code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
_lowerCAmelCase : Dict = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich | 340 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if point:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for item in point:
if not isinstance(_lowerCamelCase , (int, float) ):
_lowerCamelCase : Dict = (
"Expected a list of numbers as input, found "
F"""{type(_lowerCamelCase ).__name__}"""
)
raise TypeError(_lowerCamelCase )
else:
_lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}"""
raise TypeError(_lowerCamelCase )
else:
raise ValueError("Missing an input" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 340 | 1 |
"""simple docstring"""
from manim import *
class A_ ( _a ):
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = Rectangle(height=0.5 ,width=0.5 )
_lowerCamelCase : Union[str, Any] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowerCamelCase : str = [mem.copy() for i in range(6 )]
_lowerCamelCase : List[Any] = [mem.copy() for i in range(6 )]
_lowerCamelCase : str = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0 )
_lowerCamelCase : Tuple = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0 )
_lowerCamelCase : Tuple = VGroup(__lowerCAmelCase ,__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0 )
_lowerCamelCase : Optional[Any] = Text("CPU" ,font_size=24 )
_lowerCamelCase : int = Group(__lowerCAmelCase ,__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0.5 ,aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [mem.copy() for i in range(4 )]
_lowerCamelCase : Any = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0 )
_lowerCamelCase : Optional[Any] = Text("GPU" ,font_size=24 )
_lowerCamelCase : Optional[Any] = Group(__lowerCAmelCase ,__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0.5 ,aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
_lowerCamelCase : str = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0 )
_lowerCamelCase : Dict = Text("Model" ,font_size=24 )
_lowerCamelCase : Optional[int] = Group(__lowerCAmelCase ,__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0.5 ,aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
_lowerCamelCase : List[str] = []
for i, rect in enumerate(__lowerCAmelCase ):
rect.set_stroke(__lowerCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCamelCase : Optional[Any] = Rectangle(height=0.46 / 4 ,width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase ,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=__lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] ,direction=__lowerCAmelCase ,buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] ,direction=__lowerCAmelCase ,buff=0.0 )
self.add(__lowerCAmelCase )
cpu_targs.append(__lowerCAmelCase )
_lowerCamelCase : int = [mem.copy() for i in range(6 )]
_lowerCamelCase : Union[str, Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0 )
_lowerCamelCase : Any = Text("Loaded Checkpoint" ,font_size=24 )
_lowerCamelCase : List[Any] = Group(__lowerCAmelCase ,__lowerCAmelCase ).arrange(__lowerCAmelCase ,aligned_edge=__lowerCAmelCase ,buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCamelCase : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCamelCase : Optional[int] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__lowerCAmelCase ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
_lowerCamelCase : Tuple = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase ) ,Write(__lowerCAmelCase ) )
self.play(Write(__lowerCAmelCase ,run_time=1 ) ,Create(__lowerCAmelCase ,run_time=1 ) )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Optional[int] = []
for i, rect in enumerate(__lowerCAmelCase ):
_lowerCamelCase : str = fill.copy().set_fill(__lowerCAmelCase ,opacity=0.7 )
target.move_to(__lowerCAmelCase )
first_animations.append(GrowFromCenter(__lowerCAmelCase ,run_time=1 ) )
_lowerCamelCase : Optional[int] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowerCAmelCase ,run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(*__lowerCAmelCase )
self.wait() | 340 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = np.inf
def set_batch_size(_lowerCamelCase ) -> None:
nonlocal batch_size
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary":
_lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowerCamelCase , _lowerCamelCase )
return None if batch_size is np.inf else batch_size
class A_ ( _a ):
def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(
__lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths}
_lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1]
_lowerCamelCase : int = Parquet(
cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if self.streaming:
_lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,)
_lowerCamelCase : Any = self.builder.as_dataset(
split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : Any = dataset
_lowerCamelCase : Any = path_or_buf
_lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features )
_lowerCamelCase : List[str] = parquet_writer_kwargs
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"wb+" ) as buffer:
_lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
else:
_lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
return written
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase )
_lowerCamelCase : List[str] = self.dataset.features.arrow_schema
_lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,):
_lowerCamelCase : List[str] = query_table(
table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written | 340 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_a )
class A_ ( _a ):
lowerCAmelCase__ = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase__ = Features({'image': Image()} )
lowerCAmelCase__ = Features({'labels': ClassLabel} )
lowerCAmelCase__ = "image"
lowerCAmelCase__ = "labels"
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,__lowerCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
_lowerCamelCase : Any = copy.deepcopy(self )
_lowerCamelCase : Optional[Any] = self.label_schema.copy()
_lowerCamelCase : int = features[self.label_column]
_lowerCamelCase : Optional[int] = label_schema
return task_template
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
} | 340 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"]
_lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] )
return output
_lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments)
_lowerCAmelCase : Optional[int] = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase : Any = multiprocessing.cpu_count()
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : Any = time.time()
_lowerCAmelCase : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 340 | 1 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowerCAmelCase : Optional[Any] = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : str = "https://pypi.org/pypi/diffusers/json"
_lowerCamelCase : Optional[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys()
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Any = Path(_lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
init_hf_modules()
_lowerCamelCase : Optional[int] = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : str = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
_lowerCamelCase : int = f.read()
# Imports of the form `import .xxx`
_lowerCamelCase : int = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : str = False
_lowerCamelCase : Union[str, Any] = [module_file]
_lowerCamelCase : Optional[int] = []
# Let's recurse through all relative imports
while not no_change:
_lowerCamelCase : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
_lowerCamelCase : List[str] = Path(_lowerCamelCase ).parent
_lowerCamelCase : int = [str(module_path / m ) for m in new_imports]
_lowerCamelCase : List[str] = [f for f in new_import_files if f not in all_relative_imports]
_lowerCamelCase : List[str] = [F"""{f}.py""" for f in new_import_files]
_lowerCamelCase : Any = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
_lowerCamelCase : Tuple = f.read()
# Imports of the form `import xxx`
_lowerCamelCase : Any = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
_lowerCamelCase : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
_lowerCamelCase : Dict = list(set(_lowerCamelCase ) )
_lowerCamelCase : Tuple = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F"""{', '.join(_lowerCamelCase )}. Run `pip install {' '.join(_lowerCamelCase )}`""" )
return get_relative_imports(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : str = module_path.replace(os.path.sep , "." )
_lowerCamelCase : Tuple = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
_lowerCamelCase : Dict = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
_lowerCamelCase : int = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
_lowerCamelCase : Optional[Any] = cls
return pipeline_class
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : List[Any] = str(_lowerCamelCase )
_lowerCamelCase : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
_lowerCamelCase : Any = module_file_or_url
_lowerCamelCase : Union[str, Any] = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
_lowerCamelCase : List[Any] = get_diffusers_versions()
# cut ".dev0"
_lowerCamelCase : int = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
_lowerCamelCase : Tuple = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_lowerCamelCase : str = F"""v{revision}"""
elif revision == "main":
_lowerCamelCase : Tuple = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_lowerCamelCase : Union[str, Any] = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
_lowerCamelCase : int = cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
_lowerCamelCase : int = "git"
_lowerCamelCase : Tuple = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_lowerCamelCase : Dict = hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
_lowerCamelCase : Dict = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_lowerCamelCase : Union[str, Any] = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
_lowerCamelCase : List[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
_lowerCamelCase : List[str] = F"""{module_needed}.py"""
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Dict = use_auth_token
elif use_auth_token is True:
_lowerCamelCase : List[Any] = HfFolder.get_token()
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : int = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_lowerCamelCase : Union[str, Any] = submodule_path / commit_hash
_lowerCamelCase : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , F"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ) -> str:
'''simple docstring'''
_lowerCamelCase : Optional[int] = get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) ) | 340 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 340 | 1 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : int = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = UNetaDConditionModel(
sample_size=32 ,layers_per_block=1 ,block_out_channels=[32, 64] ,down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] ,mid_block_type="UNetMidBlock2DSimpleCrossAttn" ,up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] ,in_channels=3 ,out_channels=6 ,cross_attention_dim=32 ,encoder_hid_dim=32 ,attention_head_dim=8 ,addition_embed_type="text" ,addition_embed_type_num_heads=2 ,cross_attention_norm="group_norm" ,resnet_time_scale_shift="scale_shift" ,act_fn="gelu" ,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 ,beta_schedule="squaredcos_cap_v2" ,beta_start=0.00_01 ,beta_end=0.02 ,thresholding=__lowerCAmelCase ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type="epsilon" ,variance_type="learned_range" ,)
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self: Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Tuple = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_lowerCamelCase : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_lowerCamelCase : List[str] = UNetaDConditionModel(
sample_size=32 ,layers_per_block=[1, 2] ,block_out_channels=[32, 64] ,down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] ,mid_block_type="UNetMidBlock2DSimpleCrossAttn" ,up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] ,in_channels=6 ,out_channels=6 ,cross_attention_dim=32 ,encoder_hid_dim=32 ,attention_head_dim=8 ,addition_embed_type="text" ,addition_embed_type_num_heads=2 ,cross_attention_norm="group_norm" ,resnet_time_scale_shift="scale_shift" ,act_fn="gelu" ,class_embed_type="timestep" ,mid_block_scale_factor=1.4_14 ,time_embedding_act_fn="gelu" ,time_embedding_dim=32 ,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1_000 ,beta_schedule="squaredcos_cap_v2" ,beta_start=0.00_01 ,beta_end=0.02 ,thresholding=__lowerCAmelCase ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type="epsilon" ,variance_type="learned_range" ,)
torch.manual_seed(0 )
_lowerCamelCase : Tuple = DDPMScheduler(
num_train_timesteps=1_000 ,beta_schedule="squaredcos_cap_v2" ,beta_start=0.00_01 ,beta_end=0.02 ,)
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.get_dummy_components()
_lowerCamelCase : Tuple = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : List[str] = inputs["prompt"]
_lowerCamelCase : Any = inputs["generator"]
_lowerCamelCase : Optional[Any] = inputs["num_inference_steps"]
_lowerCamelCase : str = inputs["output_type"]
if "image" in inputs:
_lowerCamelCase : Any = inputs["image"]
else:
_lowerCamelCase : Optional[Any] = None
if "mask_image" in inputs:
_lowerCamelCase : int = inputs["mask_image"]
else:
_lowerCamelCase : Optional[Any] = None
if "original_image" in inputs:
_lowerCamelCase : Any = inputs["original_image"]
else:
_lowerCamelCase : Optional[int] = None
_lowerCamelCase, _lowerCamelCase : Tuple = pipe.encode_prompt(__lowerCAmelCase )
# inputs with prompt converted to embeddings
_lowerCamelCase : List[str] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
_lowerCamelCase : str = image
if mask_image is not None:
_lowerCamelCase : Tuple = mask_image
if original_image is not None:
_lowerCamelCase : Tuple = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase ,__lowerCAmelCase ) is None ,F"""`{optional_component}` did not stay set to None after loading.""" ,)
_lowerCamelCase : int = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Tuple = inputs["generator"]
_lowerCamelCase : Optional[int] = inputs["num_inference_steps"]
_lowerCamelCase : str = inputs["output_type"]
# inputs with prompt converted to embeddings
_lowerCamelCase : Any = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
_lowerCamelCase : str = image
if mask_image is not None:
_lowerCamelCase : Optional[int] = mask_image
if original_image is not None:
_lowerCamelCase : List[str] = original_image
_lowerCamelCase : List[str] = pipe_loaded(**__lowerCAmelCase )[0]
_lowerCamelCase : Dict = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase ,1e-4 )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_lowerCamelCase : Dict = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : str = pipe_loaded(**__lowerCAmelCase )[0]
_lowerCamelCase : List[str] = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase ,1e-4 ) | 340 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = 'masked_bert'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = pruning_method
_lowerCamelCase : str = mask_init
_lowerCamelCase : List[Any] = mask_scale | 340 | 1 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class A_ ( _a ):
lowerCAmelCase__ = 'xlnet'
lowerCAmelCase__ = ['mems']
lowerCAmelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self: int ,__lowerCAmelCase: str=32_000 ,__lowerCAmelCase: str=1_024 ,__lowerCAmelCase: List[str]=24 ,__lowerCAmelCase: Dict=16 ,__lowerCAmelCase: Dict=4_096 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[Any]="bi" ,__lowerCAmelCase: Dict=0.02 ,__lowerCAmelCase: str=1e-12 ,__lowerCAmelCase: Tuple=0.1 ,__lowerCAmelCase: Union[str, Any]=512 ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: Dict=False ,__lowerCAmelCase: Tuple=-1 ,__lowerCAmelCase: Dict=False ,__lowerCAmelCase: List[str]="last" ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: List[Any]="tanh" ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: Optional[Any]=5 ,__lowerCAmelCase: Dict=5 ,__lowerCAmelCase: Dict=5 ,__lowerCAmelCase: Optional[int]=1 ,__lowerCAmelCase: int=2 ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Any = d_model
_lowerCamelCase : Dict = n_layer
_lowerCamelCase : str = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
_lowerCamelCase : List[Any] = d_model // n_head
_lowerCamelCase : int = ff_activation
_lowerCamelCase : str = d_inner
_lowerCamelCase : Any = untie_r
_lowerCamelCase : Dict = attn_type
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : List[Any] = dropout
_lowerCamelCase : Union[str, Any] = mem_len
_lowerCamelCase : Dict = reuse_len
_lowerCamelCase : Union[str, Any] = bi_data
_lowerCamelCase : List[str] = clamp_len
_lowerCamelCase : Any = same_length
_lowerCamelCase : Union[str, Any] = summary_type
_lowerCamelCase : List[Any] = summary_use_proj
_lowerCamelCase : Tuple = summary_activation
_lowerCamelCase : List[str] = summary_last_dropout
_lowerCamelCase : Dict = start_n_top
_lowerCamelCase : Dict = end_n_top
_lowerCamelCase : Dict = bos_token_id
_lowerCamelCase : int = pad_token_id
_lowerCamelCase : Any = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." ,__lowerCAmelCase ,)
_lowerCamelCase : List[str] = kwargs["use_cache"]
_lowerCamelCase : Any = use_mems_eval
_lowerCamelCase : List[Any] = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
@property
def _lowercase ( self: str ):
'''simple docstring'''
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _lowercase ( self: str ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 340 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = random.Random()
_lowerCamelCase : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[int] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : List[str] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = max_length
_lowerCamelCase : Tuple = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = max_length
_lowerCamelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[int] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = max_length
_lowerCamelCase : Optional[Any] = 0.8
_lowerCamelCase : Union[str, Any] = 10
_lowerCamelCase : List[str] = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : str = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = 2
_lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : Optional[Any] = "Hello world"
_lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : List[str] = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase ) | 340 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ ( _a , _a , _a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=9 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=__lowerCAmelCase ,)
_lowerCamelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
_lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="gelu" ,projection_dim=512 ,)
_lowerCamelCase : List[str] = CLIPTextModel(__lowerCAmelCase )
_lowerCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCamelCase : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Tuple=0 ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Any = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_lowerCamelCase : Optional[Any] = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" ).resize((64, 64) )
_lowerCamelCase : List[str] = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(__lowerCAmelCase ).startswith("mps" ):
_lowerCamelCase : str = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : str = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : str = self.get_dummy_components()
_lowerCamelCase : List[Any] = StableDiffusionInpaintPipeline(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Dict = sd_pipe(**__lowerCAmelCase ).images
_lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : Optional[int] = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def _lowercase ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_lowerCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_lowerCamelCase : Union[str, Any] = "stabilityai/stable-diffusion-2-inpainting"
_lowerCamelCase : List[str] = StableDiffusionInpaintPipeline.from_pretrained(__lowerCAmelCase ,safety_checker=__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : str = "Face of a yellow cat, high resolution, sitting on a park bench"
_lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(
prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,mask_image=__lowerCAmelCase ,generator=__lowerCAmelCase ,output_type="np" ,)
_lowerCamelCase : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_lowerCamelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_lowerCamelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_lowerCamelCase : int = "stabilityai/stable-diffusion-2-inpainting"
_lowerCamelCase : Any = StableDiffusionInpaintPipeline.from_pretrained(
__lowerCAmelCase ,torch_dtype=torch.floataa ,safety_checker=__lowerCAmelCase ,)
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : Tuple = "Face of a yellow cat, high resolution, sitting on a park bench"
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(
prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,mask_image=__lowerCAmelCase ,generator=__lowerCAmelCase ,output_type="np" ,)
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowercase ( self: int ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_lowerCamelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_lowerCamelCase : Any = "stabilityai/stable-diffusion-2-inpainting"
_lowerCamelCase : List[Any] = PNDMScheduler.from_pretrained(__lowerCAmelCase ,subfolder="scheduler" )
_lowerCamelCase : Dict = StableDiffusionInpaintPipeline.from_pretrained(
__lowerCAmelCase ,safety_checker=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,torch_dtype=torch.floataa ,)
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCamelCase : Dict = "Face of a yellow cat, high resolution, sitting on a park bench"
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(
prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,mask_image=__lowerCAmelCase ,generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="np" ,)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9 | 340 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A_ ( _a ):
lowerCAmelCase__ = 'mobilenet_v1'
def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = depth_multiplier
_lowerCamelCase : Any = min_depth
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Dict = tf_padding
_lowerCamelCase : Union[str, Any] = classifier_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
class A_ ( _a ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return 1e-4 | 340 | 1 |
"""simple docstring"""
import os
from math import logaa
def lowerCamelCase_( _lowerCamelCase = "base_exp.txt" ) -> int:
'''simple docstring'''
_lowerCamelCase : float = 0
_lowerCamelCase : List[Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_lowerCamelCase ) , _lowerCamelCase ) ) ):
_lowerCamelCase, _lowerCamelCase : Dict = list(map(_lowerCamelCase , line.split("," ) ) )
if x * logaa(_lowerCamelCase ) > largest:
_lowerCamelCase : Union[str, Any] = x * logaa(_lowerCamelCase )
_lowerCamelCase : List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution()) | 340 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
for param in module.parameters():
_lowerCamelCase : Optional[int] = False
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_lowerCamelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Dict = plt.imshow(_lowerCamelCase )
fig.axes.get_xaxis().set_visible(_lowerCamelCase )
fig.axes.get_yaxis().set_visible(_lowerCamelCase )
plt.show()
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Tuple = datetime.now()
_lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp | 340 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCAmelCase : Optional[int] = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def lowerCamelCase_( _lowerCamelCase ) -> list[int]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
_lowerCamelCase : Optional[int] = []
for num in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Union[str, Any] = 0
while 2 * i * i <= odd_composites[num]:
_lowerCamelCase : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(_lowerCamelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_lowerCamelCase ) == n:
return list_nums
return []
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''') | 340 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCamelCase : list = []
for char_count in range(_lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''') | 340 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
_lowerCAmelCase : Any = logging.getLogger(__name__)
_lowerCAmelCase : Tuple = {'''facebook/bart-base''': BartForConditionalGeneration}
_lowerCAmelCase : Union[str, Any] = {'''facebook/bart-base''': BartTokenizer}
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Tuple = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=_lowerCamelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=_lowerCamelCase , default=_lowerCamelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowerCamelCase , )
parser.add_argument(
"--config_name" , type=_lowerCamelCase , default=_lowerCamelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=_lowerCamelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=_lowerCamelCase , default=_lowerCamelCase , help="Where to store the final ONNX file." )
_lowerCamelCase : Dict = parser.parse_args()
return args
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="cpu" ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCamelCase : Tuple = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCamelCase : Any = 0
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = 0
return huggingface_model, tokenizer
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
model.eval()
_lowerCamelCase : Tuple = None
_lowerCamelCase : str = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCamelCase : int = "My friends are cool but they eat too many carbs."
_lowerCamelCase : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
_lowerCamelCase : str = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=_lowerCamelCase , max_length=_lowerCamelCase , early_stopping=_lowerCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_lowerCamelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _lowerCamelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=_lowerCamelCase , )
logger.info("Model exported to {}".format(_lowerCamelCase ) )
_lowerCamelCase : List[Any] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(_lowerCamelCase ) )
_lowerCamelCase : List[str] = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = ort_sess.run(
_lowerCamelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(_lowerCamelCase ),
"max_length": np.array(_lowerCamelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = parse_args()
_lowerCamelCase : Optional[Any] = 5
_lowerCamelCase : List[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCamelCase : Optional[Any] = torch.device(args.device )
_lowerCamelCase, _lowerCamelCase : List[Any] = load_model_tokenizer(args.model_name_or_path , _lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCamelCase : Optional[Any] = args.max_length
if args.num_beams:
_lowerCamelCase : Tuple = args.num_beams
if args.output_file_path:
_lowerCamelCase : int = args.output_file_path
else:
_lowerCamelCase : Dict = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main() | 340 |
"""simple docstring"""
_lowerCAmelCase : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Any = [False] * len(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = [s]
_lowerCamelCase : str = True
while queue:
_lowerCamelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCamelCase : Any = True
_lowerCamelCase : Any = u
return visited[t]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase ))
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = float("Inf" )
_lowerCamelCase : Dict = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_lowerCamelCase : Optional[Any] = sink
while v != source:
_lowerCamelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : List[str] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 340 | 1 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
_lowerCAmelCase : List[str] = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = tmp_path_factory.getbasetemp() / "cache"
_lowerCamelCase : Dict = test_hf_cache_home / "datasets"
_lowerCamelCase : int = test_hf_cache_home / "metrics"
_lowerCamelCase : List[str] = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(_lowerCamelCase ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(_lowerCamelCase ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(_lowerCamelCase ) )
_lowerCamelCase : List[Any] = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : Optional[int] = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
@pytest.fixture(autouse=_lowerCamelCase , scope="session" )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , _lowerCamelCase ) | 340 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 340 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase ):
'''simple docstring'''
_lowerCamelCase : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowerCamelCase : str = 128
elif "12-12" in model_name:
_lowerCamelCase : str = 12
_lowerCamelCase : List[str] = 12
elif "14-14" in model_name:
_lowerCamelCase : Union[str, Any] = 14
_lowerCamelCase : Dict = 14
elif "16-16" in model_name:
_lowerCamelCase : int = 16
_lowerCamelCase : List[Any] = 16
else:
raise ValueError("Model not supported" )
_lowerCamelCase : Optional[Any] = 'huggingface/label-files'
if "speech-commands" in model_name:
_lowerCamelCase : Optional[int] = 35
_lowerCamelCase : Union[str, Any] = 'speech-commands-v2-id2label.json'
else:
_lowerCamelCase : Union[str, Any] = 527
_lowerCamelCase : Tuple = 'audioset-id2label.json'
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Tuple = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_( _lowerCamelCase ):
'''simple docstring'''
if "module.v" in name:
_lowerCamelCase : str = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
_lowerCamelCase : List[Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
_lowerCamelCase : List[Any] = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
_lowerCamelCase : Optional[int] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_lowerCamelCase : List[Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
_lowerCamelCase : Dict = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
_lowerCamelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_lowerCamelCase : Dict = name.replace("attn" , "attention.self" )
if "norm1" in name:
_lowerCamelCase : Dict = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_lowerCamelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_lowerCamelCase : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_lowerCamelCase : Optional[int] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowerCamelCase : int = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
_lowerCamelCase : Union[str, Any] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
_lowerCamelCase : Tuple = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase : int = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase : Union[str, Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Union[str, Any] = config.hidden_size
if "weight" in key:
_lowerCamelCase : Optional[Any] = val[:dim, :]
_lowerCamelCase : Optional[int] = val[dim : dim * 2, :]
_lowerCamelCase : Any = val[-dim:, :]
else:
_lowerCamelCase : int = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : str = val[-dim:]
else:
_lowerCamelCase : int = val
return orig_state_dict
def lowerCamelCase_( _lowerCamelCase ):
'''simple docstring'''
_lowerCamelCase : List[Any] = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCamelCase : Dict = get_audio_spectrogram_transformer_config(_UpperCAmelCase )
_lowerCamelCase : Union[str, Any] = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
_lowerCamelCase : Any = model_name_to_url[model_name]
_lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="cpu" )
# remove some keys
remove_keys(_UpperCAmelCase )
# rename some keys
_lowerCamelCase : List[Any] = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
# load 🤗 model
_lowerCamelCase : Any = ASTForAudioClassification(_UpperCAmelCase )
model.eval()
model.load_state_dict(_UpperCAmelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowerCamelCase : Dict = -4.2_6_7_7_3_9_3 if 'speech-commands' not in model_name else -6.8_4_5_9_7_8
_lowerCamelCase : Dict = 4.5_6_8_9_9_7_4 if 'speech-commands' not in model_name else 5.5_6_5_4_5_2_6
_lowerCamelCase : Tuple = 1024 if 'speech-commands' not in model_name else 128
_lowerCamelCase : Tuple = ASTFeatureExtractor(mean=_UpperCAmelCase , std=_UpperCAmelCase , max_length=_UpperCAmelCase )
if "speech-commands" in model_name:
_lowerCamelCase : int = load_dataset("speech_commands" , "v0.02" , split="validation" )
_lowerCamelCase : Optional[int] = dataset[0]['audio']['array']
else:
_lowerCamelCase : List[str] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
_lowerCamelCase : Optional[int] = torchaudio.load(_UpperCAmelCase )
_lowerCamelCase : Union[str, Any] = waveform.squeeze().numpy()
_lowerCamelCase : Optional[Any] = feature_extractor(_UpperCAmelCase , sampling_rate=16000 , return_tensors="pt" )
# forward pass
_lowerCamelCase : Any = model(**_UpperCAmelCase )
_lowerCamelCase : str = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowerCamelCase : Tuple = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowerCamelCase : Union[str, Any] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowerCamelCase : Dict = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowerCamelCase : Optional[Any] = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowerCamelCase : List[Any] = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowerCamelCase : Optional[int] = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowerCamelCase : List[Any] = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowerCamelCase : Dict = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ):
raise ValueError("Logits don\'t match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F"""MIT/{model_name}""" )
feature_extractor.push_to_hub(F"""MIT/{model_name}""" )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 350 |
"""simple docstring"""
from collections import defaultdict
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(_lowerCamelCase )
if ret % 2 == 0:
cuts.append(_lowerCamelCase )
return ret
def lowerCamelCase_( ) -> int:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9
_lowerCAmelCase : str = defaultdict(list)
_lowerCAmelCase : dict[int, bool] = {}
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 340 | 0 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[int] = numpy.array([0, 0])
_lowerCAmelCase : List[Any] = numpy.array([0.5, 0.8_660_254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : Optional[int] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[numpy.ndarray]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = initial_vectors
for _ in range(lowercase__ ):
_lowerCamelCase : List[Any] = iteration_step(lowercase__ )
return vectors
def lowerCamelCase_( _lowerCamelCase ) -> list[numpy.ndarray]:
'''simple docstring'''
_lowerCamelCase : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase : List[Any] = vectors[i + 1]
new_vectors.append(lowercase__ )
_lowerCamelCase : str = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> numpy.ndarray:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = numpy.radians(lowercase__ )
_lowerCamelCase : Tuple = numpy.cos(lowercase__ ), numpy.sin(lowercase__ )
_lowerCamelCase : Tuple = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase__ , lowercase__ )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
_lowerCamelCase : Tuple = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase : Any = zip(*lowercase__ )
plt.plot(lowercase__ , lowercase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Any = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors) | 351 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__'''
_lowerCAmelCase : Dict = '''Dummy User'''
_lowerCAmelCase : Optional[int] = '''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'''
_lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co'''
_lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
_lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
_lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(_lowerCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
return HfApi(endpoint=_lowerCamelCase )
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = HfFolder.get_token()
HfFolder.save_token(_lowerCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
def _cleanup_repo(_lowerCamelCase ):
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
@contextmanager
def _temporary_repo(_lowerCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_lowerCamelCase )
return _temporary_repo
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_ | 340 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( _UpperCAmelCase ):
lowerCAmelCase__ = ["image_processor", "tokenizer"]
lowerCAmelCase__ = "ViTImageProcessor"
lowerCAmelCase__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Any=None ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,_UpperCAmelCase ,)
_lowerCamelCase : Optional[int] = kwargs.pop("feature_extractor" )
_lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase ,_UpperCAmelCase )
def __call__( self: int ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: int=None ,__lowerCAmelCase: List[Any]=None ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
_lowerCamelCase : Optional[Any] = self.tokenizer(_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
if visual_prompt is not None:
_lowerCamelCase : str = self.image_processor(_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
if images is not None:
_lowerCamelCase : Tuple = self.image_processor(_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
if visual_prompt is not None and images is not None:
_lowerCamelCase : Any = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_lowerCamelCase : Union[str, Any] = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) ,tensor_type=_UpperCAmelCase )
def _lowercase ( self: int ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase ,**_UpperCAmelCase )
def _lowercase ( self: Any ,*__lowerCAmelCase: Any ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase ,**_UpperCAmelCase )
@property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,_UpperCAmelCase ,)
return self.image_processor_class
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,_UpperCAmelCase ,)
return self.image_processor | 352 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class A_ ( _a ):
def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,)
# merge samples
if i == 0:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample
else:
_lowerCamelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,)
idx += 1
_lowerCamelCase : int = model_path_to_save + F"""_{idx}"""
@classmethod
def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : int = 0
_lowerCamelCase : str = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCamelCase : Dict = pretrained_model_path
while os.path.isdir(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
controlnets.append(__lowerCAmelCase )
idx += 1
_lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class A_ ( A__ ):
lowerCAmelCase__ = 'luke'
def __init__( self: Dict ,__lowerCAmelCase: Any=50_267 ,__lowerCAmelCase: List[str]=500_000 ,__lowerCAmelCase: Any=768 ,__lowerCAmelCase: Dict=256 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: int=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Any=512 ,__lowerCAmelCase: Tuple=2 ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: str=1e-12 ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: List[str]=0 ,__lowerCAmelCase: Optional[int]=2 ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case )
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Any = entity_vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[int] = entity_emb_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : str = use_entity_aware_attention
_lowerCamelCase : Optional[int] = classifier_dropout | 353 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : int = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_lowerCamelCase : Tuple = "segformer.encoder." + key
if key.startswith("backbone" ):
_lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" )
if "norm" in key:
_lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" )
if "layer_norm1" in key:
_lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )]
_lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" )
if "attn.q" in key:
_lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_lowerCamelCase : Tuple = key.replace("attn" , "attention.self" )
if "fc1" in key:
_lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" )
if "fc2" in key:
_lowerCamelCase : Dict = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_lowerCamelCase : int = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" )
_lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )]
_lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" )
if key.startswith("head" ):
_lowerCamelCase : List[str] = key.replace("head" , "classifier" )
_lowerCamelCase : Union[str, Any] = value
return new_state_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_lowerCamelCase : int = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]]
_lowerCamelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCamelCase : Optional[Any] = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Any = SegformerConfig()
_lowerCamelCase : int = False
# set attributes based on model_name
_lowerCamelCase : Any = "huggingface/label-files"
if "segformer" in model_name:
_lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_lowerCamelCase : str = 150
_lowerCamelCase : Dict = "ade20k-id2label.json"
_lowerCamelCase : Dict = (1, 150, 128, 128)
elif "city" in model_name:
_lowerCamelCase : List[str] = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
_lowerCamelCase : Tuple = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = model_name[4:6]
_lowerCamelCase : Tuple = 1000
_lowerCamelCase : List[Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : List[Any] = (1, 1000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
_lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : int = 256
elif size == "b2":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : List[Any] = 768
_lowerCamelCase : Any = [3, 4, 6, 3]
elif size == "b3":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : Union[str, Any] = 768
_lowerCamelCase : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
_lowerCamelCase : str = [64, 128, 320, 512]
_lowerCamelCase : Optional[Any] = 768
_lowerCamelCase : Dict = [3, 8, 27, 3]
elif size == "b5":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : Tuple = 768
_lowerCamelCase : Tuple = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_lowerCamelCase : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
# prepare image
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
else:
_lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
_lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase )
else:
_lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
_lowerCamelCase : Any = model(_lowerCamelCase )
_lowerCamelCase : Dict = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCamelCase : int = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCamelCase : Dict = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCamelCase : Optional[int] = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCamelCase : List[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
_lowerCamelCase : Dict = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 340 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class A_ ( a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionLatentUpscalePipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
lowerCAmelCase__ = True
@property
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = 1
_lowerCamelCase : Dict = 4
_lowerCamelCase : Union[str, Any] = (16, 16)
_lowerCamelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_lowerCamelCase )
return image
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : int = UNetaDConditionModel(
act_fn="gelu" ,attention_head_dim=8 ,norm_num_groups=_lowerCamelCase ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) ,in_channels=8 ,mid_block_type=_lowerCamelCase ,only_cross_attention=_lowerCamelCase ,out_channels=5 ,resnet_time_scale_shift="scale_shift" ,time_embedding_type="fourier" ,timestep_post_act="gelu" ,up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") ,)
_lowerCamelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
_lowerCamelCase : List[str] = EulerDiscreteScheduler(prediction_type="sample" )
_lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="quick_gelu" ,projection_dim=512 ,)
_lowerCamelCase : List[Any] = CLIPTextModel(_lowerCamelCase )
_lowerCamelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCamelCase : List[str] = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _lowercase ( self: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Dict=0 ):
'''simple docstring'''
if str(_lowerCamelCase ).startswith("mps" ):
_lowerCamelCase : Dict = torch.manual_seed(_lowerCamelCase )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
_lowerCamelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = '''cpu'''
_lowerCamelCase : str = self.get_dummy_components()
_lowerCamelCase : int = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowerCamelCase : int = self.get_dummy_inputs(_lowerCamelCase )
_lowerCamelCase : List[Any] = pipe(**_lowerCamelCase ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 256, 256, 3) )
_lowerCamelCase : Tuple = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
_lowerCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase ,1e-3 )
def _lowercase ( self: Tuple ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _lowercase ( self: int ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def _lowercase ( self: Any ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def _lowercase ( self: Any ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def _lowercase ( self: Any ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
_lowerCamelCase : Dict = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**_lowerCamelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = self.get_dummy_inputs(_lowerCamelCase )
_lowerCamelCase : int = 2
_lowerCamelCase : int = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowerCamelCase : List[str] = getattr(_lowerCamelCase ,scheduler_enum.name )
_lowerCamelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
_lowerCamelCase : Optional[Any] = pipe(**_lowerCamelCase )[0]
outputs.append(_lowerCamelCase )
assert check_same_shape(_lowerCamelCase )
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.manual_seed(33 )
_lowerCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ,torch_dtype=torch.floataa )
pipe.to("cuda" )
_lowerCamelCase : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" ,torch_dtype=torch.floataa )
upscaler.to("cuda" )
_lowerCamelCase : List[str] = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
_lowerCamelCase : Union[str, Any] = pipe(_lowerCamelCase ,generator=_lowerCamelCase ,output_type="latent" ).images
_lowerCamelCase : Dict = upscaler(
prompt=_lowerCamelCase ,image=_lowerCamelCase ,num_inference_steps=20 ,guidance_scale=0 ,generator=_lowerCamelCase ,output_type="np" ,).images[0]
_lowerCamelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = torch.manual_seed(33 )
_lowerCamelCase : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" ,torch_dtype=torch.floataa )
upscaler.to("cuda" )
_lowerCamelCase : str = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
_lowerCamelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
_lowerCamelCase : Dict = upscaler(
prompt=_lowerCamelCase ,image=_lowerCamelCase ,num_inference_steps=20 ,guidance_scale=0 ,generator=_lowerCamelCase ,output_type="np" ,).images[0]
_lowerCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2 | 354 |
"""simple docstring"""
_lowerCAmelCase : dict[tuple[int, int, int], int] = {}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowerCamelCase : Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 )
_lowerCamelCase : List[Any] = state_late + state_absent + state_ontime
_lowerCamelCase : int = prizestrings
return prizestrings
def lowerCamelCase_( _lowerCamelCase = 30 ) -> int:
'''simple docstring'''
return _calculate(_lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 340 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : str = LxmertConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowerCamelCase : Union[str, Any] = LxmertForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__a , __a , __a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 355 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : int = str(_lowerCamelCase )
return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" )
def lowerCamelCase_( ) -> int | None:
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
_lowerCamelCase : Union[str, Any] = 100002 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_lowerCamelCase : Tuple = 1002003 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 340 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[str]=12 ,__lowerCAmelCase: Dict=7 ,__lowerCAmelCase: int=True ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Optional[Any]=99 ,__lowerCAmelCase: Union[str, Any]=32 ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=2 ,__lowerCAmelCase: List[Any]=4 ,__lowerCAmelCase: List[Any]=37 ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: List[Any]=0.02 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Tuple=None ,):
'''simple docstring'''
_lowerCamelCase : str = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : int = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Tuple = use_input_mask
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : List[str] = projection_dim
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Any = dropout
_lowerCamelCase : Dict = attention_dropout
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Any = scope
_lowerCamelCase : List[Any] = bos_token_id
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowerCamelCase : int = input_mask.numpy()
_lowerCamelCase, _lowerCamelCase : str = input_mask.shape
_lowerCamelCase : str = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = 0
_lowerCamelCase : List[Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Any ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = TFBlipTextModel(config=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,training=__lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase ,training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A_ ( lowerCamelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = BlipTextModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = TFBlipTextModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: Any=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__lowerCAmelCase ) | 356 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( _a ):
lowerCAmelCase__ = 'char'
lowerCAmelCase__ = 'bpe'
lowerCAmelCase__ = 'wp'
_lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( _a ):
lowerCAmelCase__ = ['image_processor', 'char_tokenizer']
lowerCAmelCase__ = 'ViTImageProcessor'
lowerCAmelCase__ = 'MgpstrTokenizer'
def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,__lowerCAmelCase ,)
_lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" )
_lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is not None:
_lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCamelCase : Tuple = encodings["input_ids"]
return inputs
def _lowercase ( self: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences
_lowerCamelCase : Dict = char_preds.size(0 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" )
_lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" )
_lowerCamelCase : List[str] = []
_lowerCamelCase : str = []
for i in range(__lowerCAmelCase ):
_lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = final_strs
_lowerCamelCase : int = final_scores
_lowerCamelCase : str = char_strs
_lowerCamelCase : Dict = bpe_strs
_lowerCamelCase : int = wp_strs
return out
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_lowerCamelCase : int = self.char_decode
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Optional[int] = "[s]"
elif format == DecodeType.BPE:
_lowerCamelCase : Dict = self.bpe_decode
_lowerCamelCase : str = 2
_lowerCamelCase : Union[str, Any] = "#"
elif format == DecodeType.WORDPIECE:
_lowerCamelCase : int = self.wp_decode
_lowerCamelCase : List[str] = 102
_lowerCamelCase : List[Any] = "[SEP]"
else:
raise ValueError(F"""Format {format} is not supported.""" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], []
_lowerCamelCase : Any = pred_logits.size(0 )
_lowerCamelCase : int = pred_logits.size(1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:]
_lowerCamelCase : List[str] = decoder(__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 )
_lowerCamelCase : Any = preds_max_prob[:, 1:]
for index in range(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_str[index][:pred_eos]
_lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist()
_lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1
_lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
_lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCAmelCase )
conf_scores.append(__lowerCAmelCase )
return dec_strs, conf_scores
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs | 340 | 0 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [False] * len(__A )
_lowerCamelCase : Optional[Any] = [-1] * len(__A )
def dfs(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Dict = True
_lowerCamelCase : str = c
for u in graph[v]:
if not visited[u]:
dfs(__A , 1 - c )
for i in range(len(__A ) ):
if not visited[i]:
dfs(__A , 0 )
for i in range(len(__A ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCAmelCase : Tuple = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 357 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_lowerCAmelCase : List[Any] = get_logger(__name__)
class A_ :
lowerCAmelCase__ = 'dummy_data'
lowerCAmelCase__ = 'datasets'
lowerCAmelCase__ = False
def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,):
'''simple docstring'''
_lowerCamelCase : str = 0
_lowerCamelCase : List[str] = dataset_name
_lowerCamelCase : Optional[int] = cache_dir
_lowerCamelCase : Optional[int] = use_local_dummy_data
_lowerCamelCase : int = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : Tuple = str(__lowerCAmelCase )
# to be downloaded
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self._dummy_file is None:
_lowerCamelCase : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : Optional[int] = cached_path(
__lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase )
return os.path.join(__lowerCAmelCase ,self.dummy_file_name )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self._bucket_url is None:
_lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,(list, tuple) ):
return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase )
else:
return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return path
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return {}
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
for single_url in single_urls:
download_callback(__lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = single_urls
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Union[str, Any] = single_urls
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) )
_lowerCamelCase : List[Any] = value
# make sure that values are unique
if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url )
_lowerCamelCase : Optional[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__lowerCAmelCase )
return dummy_data_list
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
def _iter_archive_members(__lowerCAmelCase: Any ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : Tuple = Path(self.dummy_file ).parent
_lowerCamelCase : str = path.relative_to(__lowerCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase )
_lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__lowerCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
import sys
_lowerCAmelCase : Any = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase_( _lowerCamelCase = N ) -> str:
'''simple docstring'''
_lowerCamelCase : int = -sys.maxsize - 1
for i in range(len(lowerCamelCase_ ) - 12 ):
_lowerCamelCase : Any = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_lowerCamelCase : Any = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''') | 358 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_lowerCamelCase : int = precision
_lowerCamelCase : Dict = ceil(precision / 14 )
_lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
_lowerCamelCase : int = 1
_lowerCamelCase : Optional[int] = 13591409
_lowerCamelCase : int = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''') | 340 | 0 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_lowerCAmelCase : int = logging.get_logger(__name__)
@add_end_docstrings(
__A , r'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class A_ ( __A ):
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if self.framework == "tf":
_lowerCamelCase : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_lowerCamelCase : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=__lowercase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase ( self: Dict ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_masked_index(__lowercase )
_lowerCamelCase : int = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" ,self.model.base_model_prefix ,F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" ,)
def _lowercase ( self: List[Any] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
if isinstance(__lowercase ,__lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__lowercase )
def _lowercase ( self: int ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if return_tensors is None:
_lowerCamelCase : Union[str, Any] = self.framework
_lowerCamelCase : Tuple = self.tokenizer(__lowercase ,return_tensors=__lowercase )
self.ensure_exactly_one_mask_token(__lowercase )
return model_inputs
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : str = self.model(**__lowercase )
_lowerCamelCase : Optional[Any] = model_inputs["input_ids"]
return model_outputs
def _lowercase ( self: List[str] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any=5 ,__lowerCAmelCase: Optional[int]=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
_lowerCamelCase : Any = target_ids.shape[0]
_lowerCamelCase : Dict = model_outputs["input_ids"][0]
_lowerCamelCase : Optional[int] = model_outputs["logits"]
if self.framework == "tf":
_lowerCamelCase : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_lowerCamelCase : List[Any] = outputs.numpy()
_lowerCamelCase : Dict = outputs[0, masked_index, :]
_lowerCamelCase : Dict = stable_softmax(__lowercase ,axis=-1 )
if target_ids is not None:
_lowerCamelCase : Union[str, Any] = tf.gather_nd(tf.squeeze(__lowercase ,0 ) ,target_ids.reshape(-1 ,1 ) )
_lowerCamelCase : List[str] = tf.expand_dims(__lowercase ,0 )
_lowerCamelCase : Union[str, Any] = tf.math.top_k(__lowercase ,k=__lowercase )
_lowerCamelCase, _lowerCamelCase : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_lowerCamelCase : str = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=__lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_lowerCamelCase : Any = outputs[0, masked_index, :]
_lowerCamelCase : str = logits.softmax(dim=-1 )
if target_ids is not None:
_lowerCamelCase : List[Any] = probs[..., target_ids]
_lowerCamelCase, _lowerCamelCase : int = probs.topk(__lowercase )
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Dict = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
_lowerCamelCase : Tuple = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
_lowerCamelCase : List[str] = input_ids.numpy().copy()
if target_ids is not None:
_lowerCamelCase : Dict = target_ids[p].tolist()
_lowerCamelCase : Any = p
# Filter padding out:
_lowerCamelCase : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_lowerCamelCase : Optional[Any] = self.tokenizer.decode(__lowercase ,skip_special_tokens=__lowercase )
_lowerCamelCase : int = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__lowercase )
result.append(__lowercase )
if single_mask:
return result[0]
return result
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any]=None ):
'''simple docstring'''
if isinstance(__lowercase ,__lowercase ):
_lowerCamelCase : List[str] = [targets]
try:
_lowerCamelCase : str = self.tokenizer.get_vocab()
except Exception:
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Tuple = []
for target in targets:
_lowerCamelCase : Optional[Any] = vocab.get(__lowercase ,__lowercase )
if id_ is None:
_lowerCamelCase : List[str] = self.tokenizer(
__lowercase ,add_special_tokens=__lowercase ,return_attention_mask=__lowercase ,return_token_type_ids=__lowercase ,max_length=1 ,truncation=__lowercase ,)["input_ids"]
if len(__lowercase ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
_lowerCamelCase : List[Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_lowerCamelCase : Union[str, Any] = list(set(__lowercase ) )
if len(__lowercase ) == 0:
raise ValueError("At least one target must be provided when passed." )
_lowerCamelCase : Optional[int] = np.array(__lowercase )
return target_ids
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: Any=None ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = {}
if targets is not None:
_lowerCamelCase : str = self.get_target_ids(__lowercase ,__lowercase )
_lowerCamelCase : int = target_ids
if top_k is not None:
_lowerCamelCase : Tuple = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" ,self.model.base_model_prefix ,"The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self: Dict ,__lowerCAmelCase: List[Any] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Any = super().__call__(__lowercase ,**__lowercase )
if isinstance(__lowercase ,__lowercase ) and len(__lowercase ) == 1:
return outputs[0]
return outputs | 359 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowerCamelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCamelCase : Any = i / num_diffusion_timesteps
_lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class A_ ( _a , _a ):
@register_to_config
def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
_lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 1.0 - self.betas
_lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 )
_lowerCamelCase : int = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase : Tuple = 1.0
# setable values
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() )
_lowerCamelCase : List[str] = variance_type
def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
return sample
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ):
'''simple docstring'''
_lowerCamelCase : str = num_inference_steps
_lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ):
'''simple docstring'''
if prev_timestep is None:
_lowerCamelCase : List[str] = t - 1
_lowerCamelCase : Optional[int] = self.alphas_cumprod[t]
_lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : List[Any] = self.betas[t]
else:
_lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) )
_lowerCamelCase : str = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase : str = variance.log()
_lowerCamelCase : str = beta.log()
_lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2
_lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 )
else:
_lowerCamelCase : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase : List[Any] = t - 1
_lowerCamelCase : Dict = self.alphas_cumprod[t]
_lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Any = self.betas[t]
_lowerCamelCase : str = self.alphas[t]
else:
_lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : List[Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Any = torch.clamp(
__lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase : Union[str, Any] = 0
if t > 0:
_lowerCamelCase : Dict = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device )
_lowerCamelCase : Any = self._get_variance(
__lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,)
if self.variance_type == "fixed_small_log":
_lowerCamelCase : Optional[Any] = variance
elif self.variance_type == "learned_range":
_lowerCamelCase : Optional[int] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
_lowerCamelCase : Dict = variance * variance_noise
_lowerCamelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,):
'''simple docstring'''
_lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
_lowerCamelCase : Any = timesteps.to(original_samples.device )
_lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 340 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class A_ ( A_ ):
lowerCAmelCase__ = "open-llama"
def __init__( self: List[str] ,__lowerCAmelCase: Tuple=100_000 ,__lowerCAmelCase: Dict=4_096 ,__lowerCAmelCase: str=11_008 ,__lowerCAmelCase: int=32 ,__lowerCAmelCase: Union[str, Any]=32 ,__lowerCAmelCase: str="silu" ,__lowerCAmelCase: str=2_048 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Any=1e-6 ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: int=True ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Tuple = rms_norm_eps
_lowerCamelCase : int = use_cache
_lowerCamelCase : Optional[int] = kwargs.pop(
"use_memorry_efficient_attention" ,snake_case__ )
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_dropout_prob
_lowerCamelCase : Any = use_stable_embedding
_lowerCamelCase : str = shared_input_output_embedding
_lowerCamelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,tie_word_embeddings=snake_case__ ,**snake_case__ ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
_lowerCamelCase : str = self.rope_scaling.get("type" ,snake_case__ )
_lowerCamelCase : Dict = self.rope_scaling.get("factor" ,snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case__ ,snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" ) | 360 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : str = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for attribute in key.split("." ):
_lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCamelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCamelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCamelCase : List[Any] = value
elif weight_type == "bias":
_lowerCamelCase : str = value
elif weight_type == "running_mean":
_lowerCamelCase : Optional[int] = value
elif weight_type == "running_var":
_lowerCamelCase : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase : int = value
elif weight_type == "inv_freq":
_lowerCamelCase : List[str] = value
else:
_lowerCamelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = fairseq_model.state_dict()
_lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCamelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCamelCase : int = True
if "*" in mapped_key:
_lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
_lowerCamelCase : int = None
elif "pos_bias_v" in name:
_lowerCamelCase : Any = None
elif "weight_g" in name:
_lowerCamelCase : Any = "weight_g"
elif "weight_v" in name:
_lowerCamelCase : Any = "weight_v"
elif "bias" in name:
_lowerCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : Dict = "weight"
elif "running_mean" in name:
_lowerCamelCase : str = "running_mean"
elif "inv_freq" in name:
_lowerCamelCase : List[Any] = "inv_freq"
elif "running_var" in name:
_lowerCamelCase : Tuple = "running_var"
elif "num_batches_tracked" in name:
_lowerCamelCase : str = "num_batches_tracked"
else:
_lowerCamelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = full_name.split("conv_layers." )[-1]
_lowerCamelCase : List[Any] = name.split("." )
_lowerCamelCase : Union[str, Any] = int(items[0] )
_lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
_lowerCamelCase : Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCamelCase : List[Any] = "rotary"
if is_finetuned:
if dict_path:
_lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase : Optional[int] = target_dict.pad_index
_lowerCamelCase : Dict = target_dict.bos_index
_lowerCamelCase : Optional[Any] = target_dict.eos_index
_lowerCamelCase : str = len(target_dict.symbols )
_lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False
_lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" )
_lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCamelCase : Dict = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 340 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
_lowerCamelCase : List[str] = ""
while len(lowerCAmelCase__ ) % 3 != 0:
_lowerCamelCase : str = "0" + bin_string
_lowerCamelCase : int = [
bin_string[index : index + 3]
for index in range(len(lowerCAmelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_lowerCamelCase : Union[str, Any] = 0
for index, val in enumerate(lowerCAmelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCAmelCase__ ) )
oct_string += str(lowerCAmelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 361 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if point:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for item in point:
if not isinstance(_lowerCamelCase , (int, float) ):
_lowerCamelCase : Dict = (
"Expected a list of numbers as input, found "
F"""{type(_lowerCamelCase ).__name__}"""
)
raise TypeError(_lowerCamelCase )
else:
_lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}"""
raise TypeError(_lowerCamelCase )
else:
raise ValueError("Missing an input" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 340 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
@property
def _lowercase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
return model
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=3 ,)
return model
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : str = self.dummy_uncond_unet
_lowerCamelCase : Tuple = DDIMScheduler()
_lowerCamelCase : Union[str, Any] = self.dummy_vq_model
_lowerCamelCase : Optional[int] = LDMPipeline(unet=__lowerCAmelCase ,vqvae=__lowerCAmelCase ,scheduler=__lowerCAmelCase )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : Any = ldm(generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="numpy" ).images
_lowerCamelCase : str = torch.manual_seed(0 )
_lowerCamelCase : int = ldm(generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="numpy" ,return_dict=__lowerCAmelCase )[0]
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : List[Any] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_lowerCamelCase : Dict = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : Any = ldm(generator=__lowerCAmelCase ,num_inference_steps=5 ,output_type="numpy" ).images
_lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : Optional[Any] = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 362 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = np.inf
def set_batch_size(_lowerCamelCase ) -> None:
nonlocal batch_size
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary":
_lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowerCamelCase , _lowerCamelCase )
return None if batch_size is np.inf else batch_size
class A_ ( _a ):
def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(
__lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths}
_lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1]
_lowerCamelCase : int = Parquet(
cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if self.streaming:
_lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,)
_lowerCamelCase : Any = self.builder.as_dataset(
split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : Any = dataset
_lowerCamelCase : Any = path_or_buf
_lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features )
_lowerCamelCase : List[str] = parquet_writer_kwargs
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"wb+" ) as buffer:
_lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
else:
_lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
return written
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase )
_lowerCamelCase : List[str] = self.dataset.features.arrow_schema
_lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,):
_lowerCamelCase : List[str] = query_table(
table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written | 340 | 0 |
"""simple docstring"""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_lowerCAmelCase : List[Any] = Mapping[str, np.ndarray]
_lowerCAmelCase : Optional[int] = Mapping[str, Any] # Is a nested dict.
_lowerCAmelCase : Tuple = 0.01
@dataclasses.dataclass(frozen=__lowercase )
class A_ :
lowerCAmelCase__ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase__ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase__ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase__ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase__ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase__ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase__ = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase__ = None
# Chain corresponding to each parent
lowerCAmelCase__ = None
def lowerCamelCase_( _lowerCamelCase ) -> Protein:
'''simple docstring'''
_lowerCamelCase : List[str] = R"""(\[[A-Z]+\]\n)"""
_lowerCamelCase : List[str] = [tag.strip() for tag in re.split(_lowerCamelCase , _lowerCamelCase ) if len(_lowerCamelCase ) > 0]
_lowerCamelCase : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
_lowerCamelCase : List[str] = ["N", "CA", "C"]
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = None
_lowerCamelCase : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_lowerCamelCase : Union[str, Any] = g[1][0].strip()
for i in range(len(_lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
_lowerCamelCase : Any = """X""" # FIXME: strings are immutable
_lowerCamelCase : int = np.array(
[residue_constants.restype_order.get(_lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_lowerCamelCase : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_lowerCamelCase , g[1][axis].split() ) ) )
_lowerCamelCase : int = np.array(_lowerCamelCase )
_lowerCamelCase : Optional[int] = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_lowerCamelCase : List[Any] = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
_lowerCamelCase : Tuple = np.zeros(
(
len(_lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_lowerCamelCase ):
_lowerCamelCase : List[str] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_lowerCamelCase , atom_mask=_lowerCamelCase , aatype=_lowerCamelCase , residue_index=np.arange(len(_lowerCamelCase ) ) , b_factors=_lowerCamelCase , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 0 ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
_lowerCamelCase : Optional[int] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_lowerCamelCase : List[str] = prot.parents
_lowerCamelCase : List[str] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_lowerCamelCase : str = [p for i, p in zip(_lowerCamelCase , _lowerCamelCase ) if i == chain_id]
if parents is None or len(_lowerCamelCase ) == 0:
_lowerCamelCase : Optional[int] = ["""N/A"""]
pdb_headers.append(F"""PARENT {' '.join(_lowerCamelCase )}""" )
return pdb_headers
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : List[str] = []
_lowerCamelCase : Dict = pdb_str.split("\n" )
_lowerCamelCase : Dict = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_lowerCamelCase : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_lowerCamelCase : Dict = []
if prot.parents_chain_index is not None:
_lowerCamelCase : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_lowerCamelCase ) , [] )
parent_dict[str(_lowerCamelCase )].append(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = max([int(_lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_lowerCamelCase : Any = parent_dict.get(str(_lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(_lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_lowerCamelCase : str = [["""N/A"""]]
def make_parent_line(_lowerCamelCase ) -> str:
return F"""PARENT {' '.join(_lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_lowerCamelCase : str = 0
for i, l in enumerate(_lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_lowerCamelCase ):
_lowerCamelCase : Any = parents_per_chain[chain_counter]
else:
_lowerCamelCase : Any = ["""N/A"""]
out_pdb_lines.append(make_parent_line(_lowerCamelCase ) )
return "\n".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Tuple = residue_constants.restypes + ["""X"""]
def res_atoa(_lowerCamelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
_lowerCamelCase : int = residue_constants.atom_types
_lowerCamelCase : List[str] = []
_lowerCamelCase : str = prot.atom_mask
_lowerCamelCase : int = prot.aatype
_lowerCamelCase : Optional[Any] = prot.atom_positions
_lowerCamelCase : int = prot.residue_index.astype(np.intaa )
_lowerCamelCase : List[str] = prot.b_factors
_lowerCamelCase : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_lowerCamelCase : List[Any] = get_pdb_headers(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
pdb_lines.extend(_lowerCamelCase )
_lowerCamelCase : Any = aatype.shape[0]
_lowerCamelCase : str = 1
_lowerCamelCase : Any = 0
_lowerCamelCase : List[Any] = string.ascii_uppercase
_lowerCamelCase : Union[str, Any] = None
# Add all atom sites.
for i in range(_lowerCamelCase ):
_lowerCamelCase : Optional[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_lowerCamelCase : List[str] = """ATOM"""
_lowerCamelCase : Dict = atom_name if len(_lowerCamelCase ) == 4 else F""" {atom_name}"""
_lowerCamelCase : int = """"""
_lowerCamelCase : List[Any] = """"""
_lowerCamelCase : Optional[Any] = 1.0_0
_lowerCamelCase : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_lowerCamelCase : Tuple = """"""
_lowerCamelCase : List[str] = """A"""
if chain_index is not None:
_lowerCamelCase : Union[str, Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_lowerCamelCase : Optional[Any] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_lowerCamelCase )
atom_index += 1
_lowerCamelCase : str = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_lowerCamelCase : Optional[Any] = """TER"""
_lowerCamelCase : Optional[Any] = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_lowerCamelCase , _lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=_lowerCamelCase , remark=_lowerCamelCase , parents=_lowerCamelCase , parents_chain_index=_lowerCamelCase , ) | 363 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"]
_lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] )
return output
_lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments)
_lowerCAmelCase : Optional[int] = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase : Any = multiprocessing.cpu_count()
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : Any = time.time()
_lowerCAmelCase : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 340 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
_lowerCAmelCase : str = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
_lowerCAmelCase : Optional[int] = {
'''RUCAIBox/mvp''': 1024,
}
class A_ ( a_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase__ = MvpTokenizer
def __init__( self: Any ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[Any]="replace" ,__lowerCAmelCase: str="<s>" ,__lowerCAmelCase: List[Any]="</s>" ,__lowerCAmelCase: Union[str, Any]="</s>" ,__lowerCAmelCase: Any="<s>" ,__lowerCAmelCase: Union[str, Any]="<unk>" ,__lowerCAmelCase: Any="<pad>" ,__lowerCAmelCase: int="<mask>" ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=True ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(
lowercase_ ,lowercase_ ,tokenizer_file=lowercase_ ,errors=lowercase_ ,bos_token=lowercase_ ,eos_token=lowercase_ ,sep_token=lowercase_ ,cls_token=lowercase_ ,unk_token=lowercase_ ,pad_token=lowercase_ ,mask_token=lowercase_ ,add_prefix_space=lowercase_ ,trim_offsets=lowercase_ ,**lowercase_ ,)
_lowerCamelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" ,lowercase_ ) != add_prefix_space:
_lowerCamelCase : int = getattr(lowercase_ ,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = pre_tok_class(**lowercase_ )
_lowerCamelCase : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : str = "post_processor"
_lowerCamelCase : List[Any] = getattr(self.backend_tokenizer ,lowercase_ ,lowercase_ )
if tokenizer_component_instance:
_lowerCamelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Dict = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space" ,lowercase_ ) != add_prefix_space:
_lowerCamelCase : Union[str, Any] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets" ,lowercase_ ) != trim_offsets:
_lowerCamelCase : Optional[Any] = trim_offsets
_lowerCamelCase : Optional[int] = True
if changes_to_apply:
_lowerCamelCase : int = getattr(lowercase_ ,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**lowercase_ )
setattr(self.backend_tokenizer ,lowercase_ ,lowercase_ )
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Any = AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ) if isinstance(lowercase_ ,lowercase_ ) else value
_lowerCamelCase : Any = value
def _lowercase ( self: Union[str, Any] ,*__lowerCAmelCase: Optional[int] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words" ,lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowercase_ ,**lowercase_ )
def _lowercase ( self: List[Any] ,*__lowerCAmelCase: Optional[Any] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = kwargs.get("is_split_into_words" ,lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowercase_ ,**lowercase_ )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: int = None ):
'''simple docstring'''
_lowerCamelCase : Tuple = self._tokenizer.model.save(lowercase_ ,name=lowercase_ )
return tuple(lowercase_ )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[Any]=None ):
'''simple docstring'''
_lowerCamelCase : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self: int ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict = None ):
'''simple docstring'''
_lowerCamelCase : Dict = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 364 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 340 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class A_ ( __lowercase ):
lowerCAmelCase__ = "sew-d"
def __init__( self: Dict ,__lowerCAmelCase: List[str]=32 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Tuple=12 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: Any=2 ,__lowerCAmelCase: Optional[Any]=512 ,__lowerCAmelCase: int=256 ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[int]=("p2c", "c2p") ,__lowerCAmelCase: Any="layer_norm" ,__lowerCAmelCase: Optional[Any]="gelu_python" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[Any]=0.0 ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: Dict=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-7 ,__lowerCAmelCase: Any=1e-5 ,__lowerCAmelCase: Tuple="group" ,__lowerCAmelCase: List[str]="gelu" ,__lowerCAmelCase: int=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,__lowerCAmelCase: Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,__lowerCAmelCase: Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,__lowerCAmelCase: List[Any]=False ,__lowerCAmelCase: List[str]=128 ,__lowerCAmelCase: Dict=16 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: str=0.05 ,__lowerCAmelCase: Optional[int]=10 ,__lowerCAmelCase: Any=2 ,__lowerCAmelCase: List[Any]=0.0 ,__lowerCAmelCase: List[str]=10 ,__lowerCAmelCase: Tuple=0 ,__lowerCAmelCase: Tuple="mean" ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: Optional[Any]=256 ,__lowerCAmelCase: int=0 ,__lowerCAmelCase: Tuple=1 ,__lowerCAmelCase: Optional[int]=2 ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(**_a ,pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a )
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : List[str] = feat_extract_norm
_lowerCamelCase : List[str] = feat_extract_activation
_lowerCamelCase : Tuple = list(_a )
_lowerCamelCase : List[Any] = list(_a )
_lowerCamelCase : List[str] = list(_a )
_lowerCamelCase : Tuple = conv_bias
_lowerCamelCase : str = num_conv_pos_embeddings
_lowerCamelCase : int = num_conv_pos_embedding_groups
_lowerCamelCase : List[Any] = len(self.conv_dim )
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : int = squeeze_factor
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Any = position_buckets
_lowerCamelCase : List[str] = share_att_key
_lowerCamelCase : Optional[int] = relative_attention
_lowerCamelCase : str = norm_rel_ebd
_lowerCamelCase : List[Any] = list(_a )
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : str = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : int = activation_dropout
_lowerCamelCase : int = feat_proj_dropout
_lowerCamelCase : Any = final_dropout
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : int = feature_layer_norm_eps
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Optional[Any] = apply_spec_augment
_lowerCamelCase : Optional[int] = mask_time_prob
_lowerCamelCase : Tuple = mask_time_length
_lowerCamelCase : str = mask_time_min_masks
_lowerCamelCase : Union[str, Any] = mask_feature_prob
_lowerCamelCase : Any = mask_feature_length
_lowerCamelCase : Optional[int] = mask_feature_min_masks
# ctc loss
_lowerCamelCase : Tuple = ctc_loss_reduction
_lowerCamelCase : Optional[int] = ctc_zero_infinity
# sequence classification
_lowerCamelCase : Optional[Any] = use_weighted_layer_sum
_lowerCamelCase : Any = classifier_proj_size
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 365 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = 'masked_bert'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = pruning_method
_lowerCamelCase : str = mask_init
_lowerCamelCase : List[Any] = mask_scale | 340 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCAmelCase__ = CLIPTokenizer
lowerCAmelCase__ = CLIPTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: Dict ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowerCamelCase : Tuple = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
_lowerCamelCase : str = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_lowerCamelCase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowerCamelCase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
def _lowercase ( self: Union[str, Any] ,**__lowerCAmelCase: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def _lowercase ( self: Any ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = """lower newer"""
_lowerCamelCase : Tuple = """lower newer"""
return input_text, output_text
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_lowerCamelCase : Union[str, Any] = """lower newer"""
_lowerCamelCase : int = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_lowerCamelCase : int = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
_lowerCamelCase : Dict = tokens + [tokenizer.unk_token]
_lowerCamelCase : Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,__UpperCAmelCase )
@require_ftfy
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
_lowerCamelCase : Optional[int] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(__UpperCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_lowerCamelCase : Dict = """xa\u0303y""" + """ """ + """x\xe3y"""
_lowerCamelCase : int = tokenizer_s.tokenize(__UpperCAmelCase )
_lowerCamelCase : Dict = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
# Test that the tokenization is identical on unicode of space type
_lowerCamelCase : int = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_lowerCamelCase : Optional[int] = tokenizer_s.tokenize(__UpperCAmelCase )
_lowerCamelCase : Any = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
# Test that the tokenization is identical on unicode of line break type
_lowerCamelCase : Union[str, Any] = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_lowerCamelCase : str = tokenizer_s.tokenize(__UpperCAmelCase )
_lowerCamelCase : int = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase : Optional[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase : str = F"""{text_of_1_token} {text_of_1_token}"""
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,)
_lowerCamelCase : Optional[Any] = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
_lowerCamelCase : Any = F""" {text}"""
_lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,)
_lowerCamelCase : Dict = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _lowercase ( self: str ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass | 366 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = random.Random()
_lowerCamelCase : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[int] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : List[str] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = max_length
_lowerCamelCase : Tuple = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = max_length
_lowerCamelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[int] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = max_length
_lowerCamelCase : Optional[Any] = 0.8
_lowerCamelCase : Union[str, Any] = 10
_lowerCamelCase : List[str] = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : str = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = 2
_lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : Optional[Any] = "Hello world"
_lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : List[str] = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCAmelCase : Optional[Any] = threading.Lock()
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : str = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowerCAmelCase : Any = logging.WARNING
_lowerCAmelCase : Tuple = True
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Optional[int] = os.getenv("TRANSFORMERS_VERBOSITY" , a_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
return __name__.split("." )[0]
def lowerCamelCase_( ) -> str:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowerCamelCase : Optional[Any] = logging.StreamHandler() # Set sys.stderr as stream.
_lowerCamelCase : Dict = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowerCamelCase : Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowerCamelCase : List[str] = False
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
_lowerCamelCase : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowerCamelCase : List[Any] = None
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
return log_levels
def lowerCamelCase_( _lowerCamelCase = None ) -> Dict:
'''simple docstring'''
if name is None:
_lowerCamelCase : Optional[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(a_ )
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(a_ )
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
return set_verbosity(a_ )
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
return set_verbosity(a_ )
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
return set_verbosity(a_ )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return set_verbosity(a_ )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(a_ )
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(a_ )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
_lowerCamelCase : Union[str, Any] = False
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
_lowerCamelCase : int = True
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_lowerCamelCase : List[Any] = _get_library_root_logger().handlers
for handler in handlers:
_lowerCamelCase : List[Any] = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(a_ )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(a_ )
def lowerCamelCase_( self , *_lowerCamelCase , **_lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , a_ )
if no_advisory_warnings:
return
self.warning(*a_ , **a_ )
_lowerCAmelCase : Any = warning_advice
@functools.lru_cache(a_ )
def lowerCamelCase_( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
self.warning(*a_ , **a_ )
_lowerCAmelCase : Optional[int] = warning_once
class A_ :
def __init__( self: List[str] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[Any] ): # pylint: disable=unused-argument
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = args[0] if args else None
def __iter__( self: List[Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self: Any ,__lowerCAmelCase: Any ):
'''simple docstring'''
def empty_fn(*__lowerCAmelCase: Any ,**__lowerCAmelCase: List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self: str ):
'''simple docstring'''
return self
def __exit__( self: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
return
class A_ :
def __call__( self: Any ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: Any ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCAmelCase__ ,**lowerCAmelCase__ )
else:
return EmptyTqdm(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def _lowercase ( self: List[Any] ,*__lowerCAmelCase: Optional[int] ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def _lowercase ( self: Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCAmelCase : str = _tqdm_cls()
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
global _tqdm_active
_lowerCamelCase : Optional[Any] = True
hf_hub_utils.enable_progress_bars()
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
global _tqdm_active
_lowerCamelCase : int = False
hf_hub_utils.disable_progress_bars() | 367 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A_ ( _a ):
lowerCAmelCase__ = 'mobilenet_v1'
def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = depth_multiplier
_lowerCamelCase : Any = min_depth
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Dict = tf_padding
_lowerCamelCase : Union[str, Any] = classifier_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
class A_ ( _a ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return 1e-4 | 340 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = n * (n + 1) * (2 * n + 1) / 6
_lowerCamelCase : Optional[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''') | 368 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
for param in module.parameters():
_lowerCamelCase : Optional[int] = False
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_lowerCamelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Dict = plt.imshow(_lowerCamelCase )
fig.axes.get_xaxis().set_visible(_lowerCamelCase )
fig.axes.get_yaxis().set_visible(_lowerCamelCase )
plt.show()
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Tuple = datetime.now()
_lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp | 340 | 0 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : List[Any] = []
_lowerCamelCase : List[Any] = []
for rt in rc.restypes:
_lowerCamelCase : Optional[int] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowerCamelCase : Any = {name: i for i, name in enumerate(a__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_lowerCamelCase : Optional[Any] = torch.tensor(
a__ , dtype=torch.intaa , device=protein["aatype"].device , )
_lowerCamelCase : Tuple = torch.tensor(
a__ , dtype=torch.intaa , device=protein["aatype"].device , )
_lowerCamelCase : Dict = torch.tensor(
a__ , dtype=torch.floataa , device=protein["aatype"].device , )
_lowerCamelCase : Optional[Any] = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowerCamelCase : Optional[Any] = restype_atomaa_to_atomaa[protein_aatype]
_lowerCamelCase : Dict = restype_atomaa_mask[protein_aatype]
_lowerCamelCase : List[Any] = residx_atomaa_mask
_lowerCamelCase : Dict = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowerCamelCase : Dict = restype_atomaa_to_atomaa[protein_aatype]
_lowerCamelCase : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowerCamelCase : Optional[int] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowerCamelCase : Tuple = rc.restype_atoa[restype_letter]
_lowerCamelCase : Union[str, Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowerCamelCase : Tuple = rc.atom_order[atom_name]
_lowerCamelCase : Any = 1
_lowerCamelCase : Tuple = restype_atomaa_mask[protein_aatype]
_lowerCamelCase : Dict = residx_atomaa_mask
return protein
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tree_map(lambda _lowerCamelCase : torch.tensor(a__ , device=batch["aatype"].device ) , a__ , np.ndarray )
_lowerCamelCase : int = tensor_tree_map(lambda _lowerCamelCase : np.array(a__ ) , make_atomaa_masks(a__ ) )
return out | 369 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCamelCase : list = []
for char_count in range(_lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''') | 340 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A_ :
"""simple docstring"""
@staticmethod
def _lowercase ( *__lowerCAmelCase: Tuple ,**__lowerCAmelCase: str ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Dict = pipeline("visual-question-answering" ,model="hf-internal-testing/tiny-vilt-random-vqa" )
_lowerCamelCase : Any = [
{
"""image""": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = vqa_pipeline(_lowerCAmelCase ,top_k=1 )
self.assertEqual(
_lowerCAmelCase ,[
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] ,)
@require_torch
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = pipeline("visual-question-answering" ,model="hf-internal-testing/tiny-vilt-random-vqa" )
_lowerCamelCase : int = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCamelCase : str = """How many cats are there?"""
_lowerCamelCase : Tuple = vqa_pipeline(image=_lowerCAmelCase ,question="How many cats are there?" ,top_k=2 )
self.assertEqual(
_lowerCAmelCase ,[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
_lowerCamelCase : Any = vqa_pipeline({"image": image, "question": question} ,top_k=2 )
self.assertEqual(
_lowerCAmelCase ,[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Dict = pipeline("visual-question-answering" ,model="dandelin/vilt-b32-finetuned-vqa" )
_lowerCamelCase : List[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCamelCase : int = """How many cats are there?"""
_lowerCamelCase : int = vqa_pipeline(image=_lowerCAmelCase ,question=_lowerCAmelCase ,top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ,decimals=4 ) ,[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
_lowerCamelCase : Union[str, Any] = vqa_pipeline({"image": image, "question": question} ,top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ,decimals=4 ) ,[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
_lowerCamelCase : List[str] = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ,decimals=4 ) ,[[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 ,)
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def _lowercase ( self: Any ):
'''simple docstring'''
pass | 370 |
"""simple docstring"""
_lowerCAmelCase : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Any = [False] * len(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = [s]
_lowerCamelCase : str = True
while queue:
_lowerCamelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCamelCase : Any = True
_lowerCamelCase : Any = u
return visited[t]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase ))
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = float("Inf" )
_lowerCamelCase : Dict = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_lowerCamelCase : Optional[Any] = sink
while v != source:
_lowerCamelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : List[str] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 340 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A_ :
def __init__( self: str ,__lowerCAmelCase: List[str] ,):
'''simple docstring'''
_lowerCamelCase : Dict = parent
_lowerCamelCase : Any = 13
_lowerCamelCase : Dict = 7
_lowerCamelCase : List[str] = True
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = True
_lowerCamelCase : Tuple = 99
_lowerCamelCase : Union[str, Any] = 32
_lowerCamelCase : Dict = 2
_lowerCamelCase : Any = 4
_lowerCamelCase : Union[str, Any] = 37
_lowerCamelCase : int = """gelu"""
_lowerCamelCase : List[Any] = 0.1
_lowerCamelCase : Optional[Any] = 0.1
_lowerCamelCase : List[Any] = 512
_lowerCamelCase : Optional[int] = 16
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : str = 0.02
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : Optional[int] = None
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Dict = None
_lowerCamelCase : int = None
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCamelCase : str = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCamelCase : Any = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
(
_lowerCamelCase
) : Any = self.prepare_config_and_inputs()
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowercase ( self: str ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = TFEsmModel(config=lowercase_ )
_lowerCamelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCamelCase : List[Any] = model(lowercase_ )
_lowerCamelCase : Tuple = [input_ids, input_mask]
_lowerCamelCase : Union[str, Any] = model(lowercase_ )
_lowerCamelCase : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: int ,__lowerCAmelCase: Any ,__lowerCAmelCase: str ,__lowerCAmelCase: Any ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = TFEsmModel(config=lowercase_ )
_lowerCamelCase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_lowerCamelCase : str = model(lowercase_ )
_lowerCamelCase : Union[str, Any] = [input_ids, input_mask]
_lowerCamelCase : Optional[Any] = model(lowercase_ ,encoder_hidden_states=lowercase_ )
# Also check the case where encoder outputs are not passed
_lowerCamelCase : str = model(lowercase_ ,attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: int ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: str ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = TFEsmForMaskedLM(config=lowercase_ )
_lowerCamelCase : Optional[int] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Union[str, Any] = TFEsmForTokenClassification(config=lowercase_ )
_lowerCamelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCamelCase : List[str] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Dict = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Dict = TFEsmModelTester(self )
_lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=lowercase_ ,hidden_size=37 )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase_ )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = TFEsmModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip("Protein models do not support embedding resizing." )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Protein models do not support embedding resizing." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(lowercase_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowerCamelCase : Dict = model.get_bias()
assert isinstance(lowercase_ ,lowercase_ )
for k, v in name.items():
assert isinstance(lowercase_ ,tf.Variable )
else:
_lowerCamelCase : Any = model.get_output_embeddings()
assert x is None
_lowerCamelCase : Optional[int] = model.get_bias()
assert name is None
@require_tf
class A_ ( unittest.TestCase ):
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
_lowerCamelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCamelCase : List[Any] = model(lowercase_ )[0]
_lowerCamelCase : Tuple = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,lowercase_ )
# compare the actual values for a slice.
_lowerCamelCase : Optional[int] = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
_lowerCamelCase : Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_lowerCamelCase : List[Any] = model(lowercase_ )[0]
# compare the actual values for a slice.
_lowerCamelCase : List[Any] = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 371 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 340 | 0 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowerCAmelCase : Any = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
require_version(deps[pkg] , __a ) | 350 |
"""simple docstring"""
from collections import defaultdict
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(_lowerCamelCase )
if ret % 2 == 0:
cuts.append(_lowerCamelCase )
return ret
def lowerCamelCase_( ) -> int:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9
_lowerCAmelCase : str = defaultdict(list)
_lowerCAmelCase : dict[int, bool] = {}
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 340 | 0 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_lowerCAmelCase : int = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def _lowercase ( self: str ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple = None ,__lowerCAmelCase: List[Any] = None ,__lowerCAmelCase: List[Any] = None ,__lowerCAmelCase: str = True ,):
'''simple docstring'''
_lowerCamelCase : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ ,lowercase_ ) )]
if identifier is not None:
_lowerCamelCase : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ ,lowercase_ ):
for n_ in n_identifier:
_lowerCamelCase : str = [file for file in files if n_ not in file]
else:
_lowerCamelCase : Any = [file for file in files if n_identifier not in file]
_lowerCamelCase : Union[str, Any] = ignore_files or []
ignore_files.append("__init__.py" )
_lowerCamelCase : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" ,lowercase_ )
if only_modules:
_lowerCamelCase : str = file.split("." )[0]
try:
_lowerCamelCase : str = getattr(lowercase_ ,lowercase_ )
_lowerCamelCase : Tuple = doctest.DocTestSuite(lowercase_ )
_lowerCamelCase : int = unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_lowerCamelCase : Optional[Any] = doctest.testfile(str(".." / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : int = Path("src/transformers" )
_lowerCamelCase : str = "modeling"
_lowerCamelCase : Optional[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase_ ,identifier=lowercase_ ,ignore_files=lowercase_ )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[Any] = Path("src/transformers" )
_lowerCamelCase : Any = "tokenization"
self.analyze_directory(lowercase_ ,identifier=lowercase_ )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = Path("src/transformers" )
_lowerCamelCase : List[Any] = "configuration"
self.analyze_directory(lowercase_ ,identifier=lowercase_ )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Path("src/transformers" )
_lowerCamelCase : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase_ ,n_identifier=lowercase_ )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = Path("docs/source" )
_lowerCamelCase : Union[str, Any] = ["favicon.ico"]
self.analyze_directory(lowercase_ ,ignore_files=lowercase_ ,only_modules=lowercase_ ) | 351 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__'''
_lowerCAmelCase : Dict = '''Dummy User'''
_lowerCAmelCase : Optional[int] = '''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'''
_lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co'''
_lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
_lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
_lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(_lowerCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
return HfApi(endpoint=_lowerCamelCase )
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = HfFolder.get_token()
HfFolder.save_token(_lowerCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
def _cleanup_repo(_lowerCamelCase ):
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
@contextmanager
def _temporary_repo(_lowerCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_lowerCamelCase )
return _temporary_repo
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_ | 340 | 0 |
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowerCAmelCase : List[str] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowerCamelCase_( _lowerCamelCase ) -> list[float]:
'''simple docstring'''
_lowerCamelCase : str = []
_lowerCamelCase : Dict = len(_snake_case )
for i in range(_snake_case ):
_lowerCamelCase : float = -1
for j in range(i + 1 , _snake_case ):
if arr[i] < arr[j]:
_lowerCamelCase : List[Any] = arr[j]
break
result.append(_snake_case )
return result
def lowerCamelCase_( _lowerCamelCase ) -> list[float]:
'''simple docstring'''
_lowerCamelCase : Tuple = []
for i, outer in enumerate(_snake_case ):
_lowerCamelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_lowerCamelCase : Optional[int] = inner
break
result.append(_snake_case )
return result
def lowerCamelCase_( _lowerCamelCase ) -> list[float]:
'''simple docstring'''
_lowerCamelCase : Any = len(_snake_case )
_lowerCamelCase : list[float] = []
_lowerCamelCase : list[float] = [-1] * arr_size
for index in reversed(range(_snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_lowerCamelCase : int = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCAmelCase : Tuple = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
) | 352 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class A_ ( _a ):
def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,)
# merge samples
if i == 0:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample
else:
_lowerCamelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,)
idx += 1
_lowerCamelCase : int = model_path_to_save + F"""_{idx}"""
@classmethod
def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : int = 0
_lowerCamelCase : str = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCamelCase : Dict = pretrained_model_path
while os.path.isdir(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
controlnets.append(__lowerCAmelCase )
idx += 1
_lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = 'nat'
lowerCAmelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: int ,__lowerCAmelCase: List[Any]=4 ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: str=64 ,__lowerCAmelCase: Union[str, Any]=[3, 4, 6, 5] ,__lowerCAmelCase: str=[2, 4, 8, 16] ,__lowerCAmelCase: Union[str, Any]=7 ,__lowerCAmelCase: str=3.0 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: List[Any]=0.0 ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: Any="gelu" ,__lowerCAmelCase: int=0.02 ,__lowerCAmelCase: List[Any]=1e-5 ,__lowerCAmelCase: Dict=0.0 ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Dict=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCamelCase : Union[str, Any] = patch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Tuple = embed_dim
_lowerCamelCase : Any = depths
_lowerCamelCase : Dict = len(snake_case__ )
_lowerCamelCase : str = num_heads
_lowerCamelCase : Dict = kernel_size
_lowerCamelCase : Union[str, Any] = mlp_ratio
_lowerCamelCase : int = qkv_bias
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCamelCase : Any = layer_scale_init_value
_lowerCamelCase : Any = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(snake_case__ ) + 1 )]
_lowerCamelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ ,out_indices=snake_case__ ,stage_names=self.stage_names ) | 353 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : int = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_lowerCamelCase : Tuple = "segformer.encoder." + key
if key.startswith("backbone" ):
_lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" )
if "norm" in key:
_lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" )
if "layer_norm1" in key:
_lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )]
_lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" )
if "attn.q" in key:
_lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_lowerCamelCase : Tuple = key.replace("attn" , "attention.self" )
if "fc1" in key:
_lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" )
if "fc2" in key:
_lowerCamelCase : Dict = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_lowerCamelCase : int = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" )
_lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )]
_lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" )
if key.startswith("head" ):
_lowerCamelCase : List[str] = key.replace("head" , "classifier" )
_lowerCamelCase : Union[str, Any] = value
return new_state_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_lowerCamelCase : int = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]]
_lowerCamelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCamelCase : Optional[Any] = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Any = SegformerConfig()
_lowerCamelCase : int = False
# set attributes based on model_name
_lowerCamelCase : Any = "huggingface/label-files"
if "segformer" in model_name:
_lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_lowerCamelCase : str = 150
_lowerCamelCase : Dict = "ade20k-id2label.json"
_lowerCamelCase : Dict = (1, 150, 128, 128)
elif "city" in model_name:
_lowerCamelCase : List[str] = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
_lowerCamelCase : Tuple = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = model_name[4:6]
_lowerCamelCase : Tuple = 1000
_lowerCamelCase : List[Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : List[Any] = (1, 1000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
_lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : int = 256
elif size == "b2":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : List[Any] = 768
_lowerCamelCase : Any = [3, 4, 6, 3]
elif size == "b3":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : Union[str, Any] = 768
_lowerCamelCase : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
_lowerCamelCase : str = [64, 128, 320, 512]
_lowerCamelCase : Optional[Any] = 768
_lowerCamelCase : Dict = [3, 8, 27, 3]
elif size == "b5":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : Tuple = 768
_lowerCamelCase : Tuple = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_lowerCamelCase : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
# prepare image
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
else:
_lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
_lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase )
else:
_lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
_lowerCamelCase : Any = model(_lowerCamelCase )
_lowerCamelCase : Dict = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCamelCase : int = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCamelCase : Dict = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCamelCase : Optional[int] = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCamelCase : List[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
_lowerCamelCase : Dict = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 340 | 0 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A_ ( _a ):
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Union[str, "sqlalchemy.sql.Selectable"] ,__lowerCAmelCase: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(features=__UpperCAmelCase ,cache_dir=__UpperCAmelCase ,keep_in_memory=__UpperCAmelCase ,**__UpperCAmelCase )
_lowerCamelCase : Any = Sql(
cache_dir=__UpperCAmelCase ,features=__UpperCAmelCase ,sql=__UpperCAmelCase ,con=__UpperCAmelCase ,**__UpperCAmelCase ,)
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : str = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase ,download_mode=__UpperCAmelCase ,verification_mode=__UpperCAmelCase ,base_path=__UpperCAmelCase ,)
# Build dataset for splits
_lowerCamelCase : str = self.builder.as_dataset(
split="train" ,verification_mode=__UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class A_ :
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_lowerCamelCase : List[str] = dataset
_lowerCamelCase : Any = name
_lowerCamelCase : Optional[int] = con
_lowerCamelCase : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCamelCase : int = num_proc
_lowerCamelCase : int = to_sql_kwargs
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.to_sql_kwargs.pop("sql" ,__UpperCAmelCase )
_lowerCamelCase : Dict = self.to_sql_kwargs.pop("con" ,__UpperCAmelCase )
_lowerCamelCase : Optional[Any] = self.to_sql_kwargs.pop("index" ,__UpperCAmelCase )
_lowerCamelCase : Optional[Any] = self._write(index=__UpperCAmelCase ,**self.to_sql_kwargs )
return written
def _lowercase ( self: List[str] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = args
_lowerCamelCase : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
_lowerCamelCase : Optional[Any] = query_table(
table=self.dataset.data ,key=slice(__UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_lowerCamelCase : Tuple = batch.to_pandas()
_lowerCamelCase : Dict = df.to_sql(self.name ,self.con ,index=__UpperCAmelCase ,**__UpperCAmelCase )
return num_rows or len(__UpperCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating SQL from Arrow format" ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_lowerCamelCase, _lowerCamelCase : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,__UpperCAmelCase ,__UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating SQL from Arrow format" ,):
written += num_rows
return written | 354 |
"""simple docstring"""
_lowerCAmelCase : dict[tuple[int, int, int], int] = {}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowerCamelCase : Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 )
_lowerCamelCase : List[Any] = state_late + state_absent + state_ontime
_lowerCamelCase : int = prizestrings
return prizestrings
def lowerCamelCase_( _lowerCamelCase = 30 ) -> int:
'''simple docstring'''
return _calculate(_lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 340 | 0 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : Union[str, Any] = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def _lowercase ( cls: str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase ( cls: Union[str, Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-config" )
except HTTPError:
pass
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Any = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub("test-config" ,use_auth_token=self._token )
_lowerCamelCase : str = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ ,getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token ,repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ ,repo_id="test-config" ,push_to_hub=SCREAMING_SNAKE_CASE_ ,use_auth_token=self._token )
_lowerCamelCase : Optional[int] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ ,getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" ,use_auth_token=self._token )
_lowerCamelCase : Optional[int] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ ,getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ ,repo_id="valid_org/test-config-org" ,push_to_hub=SCREAMING_SNAKE_CASE_ ,use_auth_token=self._token )
_lowerCamelCase : Union[str, Any] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ ,getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self: Tuple ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_lowerCamelCase : Any = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{"AutoConfig": "custom_configuration.CustomConfig"} )
_lowerCamelCase : Dict = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" ,trust_remote_code=SCREAMING_SNAKE_CASE_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,"CustomConfig" )
self.assertEqual(new_config.attribute ,42 )
class A_ ( unittest.TestCase ):
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCamelCase : Dict = c.n_embd + 1 # int
_lowerCamelCase : Dict = c.resid_pdrop + 1.0 # float
_lowerCamelCase : Union[str, Any] = not c.scale_attn_weights # bool
_lowerCamelCase : Tuple = c.summary_type + """foo""" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(SCREAMING_SNAKE_CASE_ ,c.n_embd ,"mismatch for key: n_embd" )
self.assertEqual(SCREAMING_SNAKE_CASE_ ,c.resid_pdrop ,"mismatch for key: resid_pdrop" )
self.assertEqual(SCREAMING_SNAKE_CASE_ ,c.scale_attn_weights ,"mismatch for key: scale_attn_weights" )
self.assertEqual(SCREAMING_SNAKE_CASE_ ,c.summary_type ,"mismatch for key: summary_type" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = PretrainedConfig()
_lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
SCREAMING_SNAKE_CASE_ ,["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_lowerCamelCase : Tuple = [key for key, value in config_common_kwargs.items() if value == getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {', '.join(SCREAMING_SNAKE_CASE_ )}.""" )
def _lowercase ( self: int ):
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCamelCase : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_lowerCamelCase : Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" ,subfolder="bert" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = mock.Mock()
_lowerCamelCase : Union[str, Any] = 500
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" ,return_value=SCREAMING_SNAKE_CASE_ ) as mock_head:
_lowerCamelCase : Tuple = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : int = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base-cased" )
_lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Any = 2
json.dump(configuration.to_dict() ,open(os.path.join(SCREAMING_SNAKE_CASE_ ,"config.4.0.0.json" ) ,"w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCamelCase : List[Any] = ["""config.42.0.0.json"""]
_lowerCamelCase : Optional[int] = 768
configuration.save_pretrained(SCREAMING_SNAKE_CASE_ )
shutil.move(os.path.join(SCREAMING_SNAKE_CASE_ ,"config.4.0.0.json" ) ,os.path.join(SCREAMING_SNAKE_CASE_ ,"config.42.0.0.json" ) )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertEqual(new_configuration.hidden_size ,768 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
_lowerCamelCase : Optional[Any] = """v4.0.0"""
_lowerCamelCase : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
SCREAMING_SNAKE_CASE_ ,return_unused_kwargs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(SCREAMING_SNAKE_CASE_ ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCamelCase : Dict = """v3.0.0"""
_lowerCamelCase : int = old_transformers.models.auto.AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertEqual(old_configuration.hidden_size ,768 ) | 355 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : int = str(_lowerCamelCase )
return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" )
def lowerCamelCase_( ) -> int | None:
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
_lowerCamelCase : Union[str, Any] = 100002 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_lowerCamelCase : Tuple = 1002003 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 340 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A_ ( __UpperCamelCase ):
@staticmethod
@abstractmethod
def _lowercase ( __lowerCAmelCase: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def _lowercase ( self: str ):
'''simple docstring'''
raise NotImplementedError() | 356 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( _a ):
lowerCAmelCase__ = 'char'
lowerCAmelCase__ = 'bpe'
lowerCAmelCase__ = 'wp'
_lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( _a ):
lowerCAmelCase__ = ['image_processor', 'char_tokenizer']
lowerCAmelCase__ = 'ViTImageProcessor'
lowerCAmelCase__ = 'MgpstrTokenizer'
def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,__lowerCAmelCase ,)
_lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" )
_lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is not None:
_lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCamelCase : Tuple = encodings["input_ids"]
return inputs
def _lowercase ( self: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences
_lowerCamelCase : Dict = char_preds.size(0 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" )
_lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" )
_lowerCamelCase : List[str] = []
_lowerCamelCase : str = []
for i in range(__lowerCAmelCase ):
_lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = final_strs
_lowerCamelCase : int = final_scores
_lowerCamelCase : str = char_strs
_lowerCamelCase : Dict = bpe_strs
_lowerCamelCase : int = wp_strs
return out
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_lowerCamelCase : int = self.char_decode
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Optional[int] = "[s]"
elif format == DecodeType.BPE:
_lowerCamelCase : Dict = self.bpe_decode
_lowerCamelCase : str = 2
_lowerCamelCase : Union[str, Any] = "#"
elif format == DecodeType.WORDPIECE:
_lowerCamelCase : int = self.wp_decode
_lowerCamelCase : List[str] = 102
_lowerCamelCase : List[Any] = "[SEP]"
else:
raise ValueError(F"""Format {format} is not supported.""" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], []
_lowerCamelCase : Any = pred_logits.size(0 )
_lowerCamelCase : int = pred_logits.size(1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:]
_lowerCamelCase : List[str] = decoder(__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 )
_lowerCamelCase : Any = preds_max_prob[:, 1:]
for index in range(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_str[index][:pred_eos]
_lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist()
_lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1
_lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
_lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCAmelCase )
conf_scores.append(__lowerCAmelCase )
return dec_strs, conf_scores
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs | 340 | 0 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class A_ :
def __init__( self: Optional[int] ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: List[Any] ):
'''simple docstring'''
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
_lowerCamelCase : List[Any] = model
_lowerCamelCase : Optional[Any] = kwargs.get("model_save_dir" ,UpperCamelCase_ )
_lowerCamelCase : List[Any] = kwargs.get("latest_model_name" ,UpperCamelCase_ )
def __call__( self: str ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ ,UpperCamelCase_ )
@staticmethod
def _lowercase ( __lowerCAmelCase: Union[str, Path] ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: int=None ):
'''simple docstring'''
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
_lowerCamelCase : Optional[int] = "CPUExecutionProvider"
return ort.InferenceSession(UpperCamelCase_ ,providers=[provider] ,sess_options=UpperCamelCase_ )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Union[str, Path] ,__lowerCAmelCase: Optional[str] = None ,**__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_lowerCamelCase : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
_lowerCamelCase : Dict = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ ,UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_lowerCamelCase : Optional[Any] = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
_lowerCamelCase : Dict = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ ,UpperCamelCase_ )
except shutil.SameFileError:
pass
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
if os.path.isfile(UpperCamelCase_ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(UpperCamelCase_ ,exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ ,**UpperCamelCase_ )
@classmethod
def _lowercase ( cls: Optional[int] ,__lowerCAmelCase: Union[str, Path] ,__lowerCAmelCase: Optional[Union[bool, str, None]] = None ,__lowerCAmelCase: Optional[Union[str, None]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: Optional["ort.SessionOptions"] = None ,**__lowerCAmelCase: List[str] ,):
'''simple docstring'''
_lowerCamelCase : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
_lowerCamelCase : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ ,UpperCamelCase_ ) ,provider=UpperCamelCase_ ,sess_options=UpperCamelCase_ )
_lowerCamelCase : str = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
_lowerCamelCase : Optional[Any] = hf_hub_download(
repo_id=UpperCamelCase_ ,filename=UpperCamelCase_ ,use_auth_token=UpperCamelCase_ ,revision=UpperCamelCase_ ,cache_dir=UpperCamelCase_ ,force_download=UpperCamelCase_ ,)
_lowerCamelCase : List[Any] = Path(UpperCamelCase_ ).parent
_lowerCamelCase : str = Path(UpperCamelCase_ ).name
_lowerCamelCase : Dict = OnnxRuntimeModel.load_model(UpperCamelCase_ ,provider=UpperCamelCase_ ,sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ ,**UpperCamelCase_ )
@classmethod
def _lowercase ( cls: Optional[int] ,__lowerCAmelCase: Union[str, Path] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: Optional[str] = None ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
_lowerCamelCase : Tuple = None
if len(str(UpperCamelCase_ ).split("@" ) ) == 2:
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = model_id.split("@" )
return cls._from_pretrained(
model_id=UpperCamelCase_ ,revision=UpperCamelCase_ ,cache_dir=UpperCamelCase_ ,force_download=UpperCamelCase_ ,use_auth_token=UpperCamelCase_ ,**UpperCamelCase_ ,) | 357 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_lowerCAmelCase : List[Any] = get_logger(__name__)
class A_ :
lowerCAmelCase__ = 'dummy_data'
lowerCAmelCase__ = 'datasets'
lowerCAmelCase__ = False
def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,):
'''simple docstring'''
_lowerCamelCase : str = 0
_lowerCamelCase : List[str] = dataset_name
_lowerCamelCase : Optional[int] = cache_dir
_lowerCamelCase : Optional[int] = use_local_dummy_data
_lowerCamelCase : int = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : Tuple = str(__lowerCAmelCase )
# to be downloaded
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self._dummy_file is None:
_lowerCamelCase : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : Optional[int] = cached_path(
__lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase )
return os.path.join(__lowerCAmelCase ,self.dummy_file_name )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self._bucket_url is None:
_lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,(list, tuple) ):
return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase )
else:
return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return path
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return {}
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
for single_url in single_urls:
download_callback(__lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = single_urls
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Union[str, Any] = single_urls
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) )
_lowerCamelCase : List[Any] = value
# make sure that values are unique
if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url )
_lowerCamelCase : Optional[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__lowerCAmelCase )
return dummy_data_list
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
def _iter_archive_members(__lowerCAmelCase: Any ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : Tuple = Path(self.dummy_file ).parent
_lowerCamelCase : str = path.relative_to(__lowerCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase )
_lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__lowerCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
class A_ ( lowerCAmelCase_ ):
lowerCAmelCase__ = 'linear'
lowerCAmelCase__ = 'cosine'
lowerCAmelCase__ = 'cosine_with_restarts'
lowerCAmelCase__ = 'polynomial'
lowerCAmelCase__ = 'constant'
lowerCAmelCase__ = 'constant_with_warmup'
lowerCAmelCase__ = 'piecewise_constant'
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = -1 ) -> Union[str, Any]:
'''simple docstring'''
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(_lowerCamelCase ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = -1 ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_lowerCamelCase, _lowerCamelCase : Any = rule_str.split(":" )
_lowerCamelCase : List[Any] = int(_lowerCamelCase )
_lowerCamelCase : Dict = float(_lowerCamelCase )
_lowerCamelCase : str = value
_lowerCamelCase : Optional[int] = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase , _lowerCamelCase ):
def rule_func(_lowerCamelCase ) -> float:
_lowerCamelCase : List[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_lowerCamelCase : Any = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=-1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(_lowerCamelCase ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.5 , _lowerCamelCase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(_lowerCamelCase ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
_lowerCamelCase : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(_lowerCamelCase ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
_lowerCamelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1e-7 , _lowerCamelCase=1.0 , _lowerCamelCase=-1 ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_lowerCamelCase ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_lowerCamelCase : Optional[int] = lr_init - lr_end
_lowerCamelCase : Optional[int] = num_training_steps - num_warmup_steps
_lowerCamelCase : str = 1 - (current_step - num_warmup_steps) / decay_steps
_lowerCamelCase : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 1.0 , _lowerCamelCase = -1 , ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = SchedulerType(_lowerCamelCase )
_lowerCamelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase ) | 358 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_lowerCamelCase : int = precision
_lowerCamelCase : Dict = ceil(precision / 14 )
_lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
_lowerCamelCase : int = 1
_lowerCamelCase : Optional[int] = 13591409
_lowerCamelCase : int = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''') | 340 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_lowerCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
_lowerCAmelCase : Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class A_ ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = 'whisper'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: List[str] ,__lowerCAmelCase: int=51_865 ,__lowerCAmelCase: Dict=80 ,__lowerCAmelCase: int=6 ,__lowerCAmelCase: Tuple=4 ,__lowerCAmelCase: Dict=6 ,__lowerCAmelCase: str=4 ,__lowerCAmelCase: Optional[Any]=1_536 ,__lowerCAmelCase: Optional[Any]=1_536 ,__lowerCAmelCase: List[Any]=0.0 ,__lowerCAmelCase: str=0.0 ,__lowerCAmelCase: Union[str, Any]=50_257 ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Any="gelu" ,__lowerCAmelCase: Union[str, Any]=256 ,__lowerCAmelCase: Dict=0.0 ,__lowerCAmelCase: Union[str, Any]=0.0 ,__lowerCAmelCase: List[Any]=0.0 ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: int=False ,__lowerCAmelCase: str=1_500 ,__lowerCAmelCase: List[str]=448 ,__lowerCAmelCase: List[Any]=50_256 ,__lowerCAmelCase: Dict=50_256 ,__lowerCAmelCase: Tuple=50_256 ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=[220, 50_256] ,__lowerCAmelCase: List[str]=False ,__lowerCAmelCase: int=256 ,__lowerCAmelCase: int=False ,__lowerCAmelCase: List[str]=0.05 ,__lowerCAmelCase: Union[str, Any]=10 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: Dict=0.0 ,__lowerCAmelCase: Any=10 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: str=7 ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
_lowerCamelCase : str = vocab_size
_lowerCamelCase : int = num_mel_bins
_lowerCamelCase : int = d_model
_lowerCamelCase : int = encoder_layers
_lowerCamelCase : Optional[Any] = encoder_attention_heads
_lowerCamelCase : int = decoder_layers
_lowerCamelCase : str = decoder_attention_heads
_lowerCamelCase : List[str] = decoder_ffn_dim
_lowerCamelCase : Optional[Any] = encoder_ffn_dim
_lowerCamelCase : List[Any] = dropout
_lowerCamelCase : Tuple = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : str = activation_function
_lowerCamelCase : Optional[Any] = init_std
_lowerCamelCase : Any = encoder_layerdrop
_lowerCamelCase : str = decoder_layerdrop
_lowerCamelCase : List[str] = use_cache
_lowerCamelCase : Any = encoder_layers
_lowerCamelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : Dict = max_source_positions
_lowerCamelCase : Optional[int] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : Union[str, Any] = classifier_proj_size
_lowerCamelCase : Any = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Tuple = apply_spec_augment
_lowerCamelCase : List[str] = mask_time_prob
_lowerCamelCase : Dict = mask_time_length
_lowerCamelCase : Tuple = mask_time_min_masks
_lowerCamelCase : List[Any] = mask_feature_prob
_lowerCamelCase : Tuple = mask_feature_length
_lowerCamelCase : int = mask_feature_min_masks
_lowerCamelCase : Tuple = median_filter_width
super().__init__(
pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,is_encoder_decoder=a_ ,decoder_start_token_id=a_ ,suppress_tokens=a_ ,begin_suppress_tokens=a_ ,**a_ ,)
class A_ ( SCREAMING_SNAKE_CASE__ ):
@property
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Dict = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
_lowerCamelCase : Dict = {0: "batch"}
else:
_lowerCamelCase : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_ ,direction="inputs" )
return common_inputs
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional["TensorType"] = None ,__lowerCAmelCase: int = 22_050 ,__lowerCAmelCase: float = 5.0 ,__lowerCAmelCase: int = 220 ,):
'''simple docstring'''
_lowerCamelCase : Tuple = OrderedDict()
_lowerCamelCase : str = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=a_ ,framework=a_ ,sampling_rate=a_ ,time_duration=a_ ,frequency=a_ ,)
_lowerCamelCase : Dict = encoder_inputs["input_features"].shape[2]
_lowerCamelCase : int = encoder_sequence_length // 2 if self.use_past else seq_length
_lowerCamelCase : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer ,a_ ,a_ ,a_ ,a_ )
_lowerCamelCase : Optional[Any] = encoder_inputs.pop("input_features" )
_lowerCamelCase : List[str] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
_lowerCamelCase : List[Any] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return 1e-3 | 359 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowerCamelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCamelCase : Any = i / num_diffusion_timesteps
_lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class A_ ( _a , _a ):
@register_to_config
def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
_lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 1.0 - self.betas
_lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 )
_lowerCamelCase : int = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase : Tuple = 1.0
# setable values
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() )
_lowerCamelCase : List[str] = variance_type
def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
return sample
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ):
'''simple docstring'''
_lowerCamelCase : str = num_inference_steps
_lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ):
'''simple docstring'''
if prev_timestep is None:
_lowerCamelCase : List[str] = t - 1
_lowerCamelCase : Optional[int] = self.alphas_cumprod[t]
_lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : List[Any] = self.betas[t]
else:
_lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) )
_lowerCamelCase : str = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase : str = variance.log()
_lowerCamelCase : str = beta.log()
_lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2
_lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 )
else:
_lowerCamelCase : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase : List[Any] = t - 1
_lowerCamelCase : Dict = self.alphas_cumprod[t]
_lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Any = self.betas[t]
_lowerCamelCase : str = self.alphas[t]
else:
_lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : List[Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Any = torch.clamp(
__lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase : Union[str, Any] = 0
if t > 0:
_lowerCamelCase : Dict = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device )
_lowerCamelCase : Any = self._get_variance(
__lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,)
if self.variance_type == "fixed_small_log":
_lowerCamelCase : Optional[Any] = variance
elif self.variance_type == "learned_range":
_lowerCamelCase : Optional[int] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
_lowerCamelCase : Dict = variance * variance_noise
_lowerCamelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,):
'''simple docstring'''
_lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
_lowerCamelCase : Any = timesteps.to(original_samples.device )
_lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 340 | 0 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCAmelCase : Union[str, Any] = HfArgumentParser(InitializationArguments)
_lowerCAmelCase : Dict = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCAmelCase : Union[str, Any] = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 360 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : str = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for attribute in key.split("." ):
_lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCamelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCamelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCamelCase : List[Any] = value
elif weight_type == "bias":
_lowerCamelCase : str = value
elif weight_type == "running_mean":
_lowerCamelCase : Optional[int] = value
elif weight_type == "running_var":
_lowerCamelCase : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase : int = value
elif weight_type == "inv_freq":
_lowerCamelCase : List[str] = value
else:
_lowerCamelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = fairseq_model.state_dict()
_lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCamelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCamelCase : int = True
if "*" in mapped_key:
_lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
_lowerCamelCase : int = None
elif "pos_bias_v" in name:
_lowerCamelCase : Any = None
elif "weight_g" in name:
_lowerCamelCase : Any = "weight_g"
elif "weight_v" in name:
_lowerCamelCase : Any = "weight_v"
elif "bias" in name:
_lowerCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : Dict = "weight"
elif "running_mean" in name:
_lowerCamelCase : str = "running_mean"
elif "inv_freq" in name:
_lowerCamelCase : List[Any] = "inv_freq"
elif "running_var" in name:
_lowerCamelCase : Tuple = "running_var"
elif "num_batches_tracked" in name:
_lowerCamelCase : str = "num_batches_tracked"
else:
_lowerCamelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = full_name.split("conv_layers." )[-1]
_lowerCamelCase : List[Any] = name.split("." )
_lowerCamelCase : Union[str, Any] = int(items[0] )
_lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
_lowerCamelCase : Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCamelCase : List[Any] = "rotary"
if is_finetuned:
if dict_path:
_lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase : Optional[int] = target_dict.pad_index
_lowerCamelCase : Dict = target_dict.bos_index
_lowerCamelCase : Optional[Any] = target_dict.eos_index
_lowerCamelCase : str = len(target_dict.symbols )
_lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False
_lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" )
_lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCamelCase : Dict = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 340 | 0 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(a__ , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Tuple = _distribute_shards(**a__ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = _split_gen_kwargs(a__ , a__ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(a__ ):
_number_of_shards_in_gen_kwargs(a__ )
else:
_lowerCamelCase : Union[str, Any] = _number_of_shards_in_gen_kwargs(a__ )
assert out == expected
| 361 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if point:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for item in point:
if not isinstance(_lowerCamelCase , (int, float) ):
_lowerCamelCase : Dict = (
"Expected a list of numbers as input, found "
F"""{type(_lowerCamelCase ).__name__}"""
)
raise TypeError(_lowerCamelCase )
else:
_lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}"""
raise TypeError(_lowerCamelCase )
else:
raise ValueError("Missing an input" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 340 | 0 |
"""simple docstring"""
import qiskit
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_lowerCamelCase : Tuple = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
_lowerCamelCase : List[str] = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_lowerCamelCase : Optional[int] = qiskit.execute(lowercase_ , lowercase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 362 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = np.inf
def set_batch_size(_lowerCamelCase ) -> None:
nonlocal batch_size
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary":
_lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowerCamelCase , _lowerCamelCase )
return None if batch_size is np.inf else batch_size
class A_ ( _a ):
def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(
__lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths}
_lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1]
_lowerCamelCase : int = Parquet(
cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if self.streaming:
_lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,)
_lowerCamelCase : Any = self.builder.as_dataset(
split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : Any = dataset
_lowerCamelCase : Any = path_or_buf
_lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features )
_lowerCamelCase : List[str] = parquet_writer_kwargs
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"wb+" ) as buffer:
_lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
else:
_lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
return written
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase )
_lowerCamelCase : List[str] = self.dataset.features.arrow_schema
_lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,):
_lowerCamelCase : List[str] = query_table(
table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written | 340 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = '''roberta-prelayernorm'''
def __init__( self: Union[str, Any] ,__lowerCAmelCase: List[str]=50_265 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[str]=12 ,__lowerCAmelCase: Optional[Any]=12 ,__lowerCAmelCase: Any=3_072 ,__lowerCAmelCase: int="gelu" ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: List[str]=1e-12 ,__lowerCAmelCase: Union[str, Any]=1 ,__lowerCAmelCase: Optional[Any]=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Dict=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : str = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : List[str] = position_embedding_type
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : Any = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 363 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"]
_lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] )
return output
_lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments)
_lowerCAmelCase : Optional[int] = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase : Any = multiprocessing.cpu_count()
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : Any = time.time()
_lowerCAmelCase : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 340 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : Optional[int] = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
_lowerCAmelCase : Dict = {
'''gpt2''': 1024,
'''gpt2-medium''': 1024,
'''gpt2-large''': 1024,
'''gpt2-xl''': 1024,
'''distilgpt2''': 1024,
}
class A_ ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = GPTaTokenizer
def __init__( self: int ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Union[str, Any]="<|endoftext|>" ,__lowerCAmelCase: Optional[Any]="<|endoftext|>" ,__lowerCAmelCase: Dict="<|endoftext|>" ,__lowerCAmelCase: int=False ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
_lowerCamelCase : Tuple = kwargs.pop("add_bos_token" ,_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" ,_SCREAMING_SNAKE_CASE ) != add_prefix_space:
_lowerCamelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE ,pre_tok_state.pop("type" ) )
_lowerCamelCase : int = add_prefix_space
_lowerCamelCase : str = pre_tok_class(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Tuple = add_prefix_space
def _lowercase ( self: Dict ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = kwargs.get("is_split_into_words" ,_SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self: Dict ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = kwargs.get("is_split_into_words" ,_SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[Any] = None ):
'''simple docstring'''
_lowerCamelCase : str = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE ,name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(_SCREAMING_SNAKE_CASE ) > self.model_max_length:
_lowerCamelCase : Dict = input_ids[-self.model_max_length :]
return input_ids
| 364 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 340 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self: Tuple ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any=13 ,__lowerCAmelCase: Optional[int]=32 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Optional[int]=16 ,__lowerCAmelCase: Dict=[1, 2, 1] ,__lowerCAmelCase: str=[2, 2, 4] ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: List[Any]=2.0 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Union[str, Any]=0.0 ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]="gelu" ,__lowerCAmelCase: Dict=False ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[Any]=1e-5 ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: List[str]=10 ,__lowerCAmelCase: List[Any]=8 ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Dict = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : Dict = depths
_lowerCamelCase : int = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : List[str] = mlp_ratio
_lowerCamelCase : List[Any] = qkv_bias
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Optional[Any] = use_absolute_embeddings
_lowerCamelCase : List[str] = patch_norm
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[int] = scope
_lowerCamelCase : int = use_labels
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : int = encoder_stride
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[str] = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _lowercase ( self: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = SwinvaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCamelCase : Union[str, Any] = model(UpperCAmelCase_ )
_lowerCamelCase : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = SwinvaForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCamelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : List[str] = SwinvaForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Dict = SwinvaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCamelCase : Dict = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
lowerCAmelCase__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase__ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = SwinvaModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self ,config_class=UpperCAmelCase_ ,embed_dim=37 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ ,nn.Linear ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(UpperCAmelCase_ )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = True
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Any = False
_lowerCamelCase : Tuple = True
_lowerCamelCase : str = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) )
_lowerCamelCase : Union[str, Any] = outputs.attentions
_lowerCamelCase : Optional[int] = len(self.model_tester.depths )
self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = config.window_size**2
_lowerCamelCase : Union[str, Any] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) )
_lowerCamelCase : Dict = outputs.attentions
self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
_lowerCamelCase : List[str] = len(UpperCAmelCase_ )
# Check attention is always last and order is fine
_lowerCamelCase : Tuple = True
_lowerCamelCase : Dict = True
_lowerCamelCase : Dict = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) )
if hasattr(self.model_tester ,"num_hidden_states_types" ):
_lowerCamelCase : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_lowerCamelCase : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states ,len(UpperCAmelCase_ ) )
_lowerCamelCase : List[str] = outputs.attentions
self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) )
_lowerCamelCase : Dict = outputs.hidden_states
_lowerCamelCase : Dict = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
# Swinv2 has a different seq_length
_lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
_lowerCamelCase : str = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
_lowerCamelCase : int = reshaped_hidden_states[0].shape
_lowerCamelCase : Dict = (
reshaped_hidden_states[0].view(UpperCAmelCase_ ,UpperCAmelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str = 3
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,(padded_height, padded_width) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = SwinvaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
UpperCAmelCase_ )
_lowerCamelCase : List[str] = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Any = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**UpperCAmelCase_ )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,UpperCAmelCase_ )
_lowerCamelCase : str = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,UpperCAmelCase_ ,atol=1e-4 ) ) | 365 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = 'masked_bert'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = pruning_method
_lowerCamelCase : str = mask_init
_lowerCamelCase : List[Any] = mask_scale | 340 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class A_ ( lowerCamelCase_ ):
lowerCAmelCase__ = """ibert"""
def __init__( self: str ,__lowerCAmelCase: Any=30_522 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: Union[str, Any]=3_072 ,__lowerCAmelCase: Tuple="gelu" ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: List[str]=512 ,__lowerCAmelCase: Any=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: List[Any]=1e-12 ,__lowerCAmelCase: Tuple=1 ,__lowerCAmelCase: Tuple=0 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: List[Any]=False ,__lowerCAmelCase: Optional[int]="none" ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ ,bos_token_id=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ ,**lowerCAmelCase__ )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Dict = position_embedding_type
_lowerCamelCase : Union[str, Any] = quant_mode
_lowerCamelCase : int = force_dequant
class A_ ( lowerCamelCase_ ):
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 366 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = random.Random()
_lowerCamelCase : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[int] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : List[str] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = max_length
_lowerCamelCase : Tuple = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = max_length
_lowerCamelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[int] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = max_length
_lowerCamelCase : Optional[Any] = 0.8
_lowerCamelCase : Union[str, Any] = 10
_lowerCamelCase : List[str] = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : str = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = 2
_lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : Optional[Any] = "Hello world"
_lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : List[str] = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCAmelCase : List[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
_lowerCAmelCase : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_lowerCAmelCase : Optional[Any] = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : str = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_lowerCamelCase : Dict = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , UpperCamelCase__ , )
is not None
):
_lowerCamelCase : Tuple = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_lowerCamelCase : List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_lowerCamelCase : int = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
_lowerCamelCase : Optional[Any] = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
_lowerCamelCase : Any = True
if not attribute_used:
_lowerCamelCase : int = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_lowerCamelCase : Any = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_lowerCamelCase : Optional[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_lowerCamelCase : Dict = True
elif attribute.endswith("_token_id" ):
_lowerCamelCase : Union[str, Any] = True
# configuration class specific cases
if not case_allowed:
_lowerCamelCase : Dict = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_lowerCamelCase : int = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Dict = dict(inspect.signature(config_class.__init__ ).parameters )
_lowerCamelCase : Optional[Any] = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
_lowerCamelCase : Tuple = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_lowerCamelCase : Tuple = {}
if len(config_class.attribute_map ) > 0:
_lowerCamelCase : List[str] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_lowerCamelCase : Optional[Any] = inspect.getsourcefile(UpperCamelCase__ )
_lowerCamelCase : List[str] = os.path.dirname(UpperCamelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_lowerCamelCase : Tuple = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for fn in os.listdir(UpperCamelCase__ ) if fn.startswith("modeling_" )]
# Get the source code strings
_lowerCamelCase : str = []
for path in modeling_paths:
if os.path.isfile(UpperCamelCase__ ):
with open(UpperCamelCase__ ) as fp:
modeling_sources.append(fp.read() )
_lowerCamelCase : Any = []
for config_param, default_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
# `attributes` here is all the variant names for `config_param`
_lowerCamelCase : Tuple = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(UpperCamelCase__ )
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Any = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_lowerCamelCase : List[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _lowerCamelCase : inspect.isclass(UpperCamelCase__ )
and issubclass(UpperCamelCase__ , UpperCamelCase__ )
and inspect.getmodule(UpperCamelCase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_lowerCamelCase : int = check_config_attributes_being_used(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_lowerCamelCase : List[str] = unused_attributes
if len(UpperCamelCase__ ) > 0:
_lowerCamelCase : Optional[int] = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
check_config_attributes() | 367 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A_ ( _a ):
lowerCAmelCase__ = 'mobilenet_v1'
def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = depth_multiplier
_lowerCamelCase : Any = min_depth
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Dict = tf_padding
_lowerCamelCase : Union[str, Any] = classifier_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
class A_ ( _a ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return 1e-4 | 340 | 0 |
"""simple docstring"""
from string import ascii_uppercase
_lowerCAmelCase : List[Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("int() can\'t convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("\'str\' object cannot be interpreted as an integer" )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("\'float\' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
_lowerCamelCase : Any = ""
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : List[str] = 0
while div != 1:
_lowerCamelCase, _lowerCamelCase : int = divmod(__lowerCamelCase , __lowerCamelCase )
if base >= 11 and 9 < mod < 36:
_lowerCamelCase : Dict = ALPHABET_VALUES[str(__lowerCamelCase )]
else:
_lowerCamelCase : Any = str(__lowerCamelCase )
new_value += actual_value
_lowerCamelCase : Tuple = num // base
_lowerCamelCase : Optional[Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__lowerCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
) | 368 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
for param in module.parameters():
_lowerCamelCase : Optional[int] = False
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_lowerCamelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Dict = plt.imshow(_lowerCamelCase )
fig.axes.get_xaxis().set_visible(_lowerCamelCase )
fig.axes.get_yaxis().set_visible(_lowerCamelCase )
plt.show()
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Tuple = datetime.now()
_lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp | 340 | 0 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__snake_case : Any = True
except ImportError:
__snake_case : Optional[Any] = False
__snake_case : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class A_ ( _a ):
@staticmethod
def _lowercase ( __lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Any = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=_a ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=_a ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=_a )
def __init__( self: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str]=None ,*__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = testing
_lowerCamelCase : Tuple = testing_file
_lowerCamelCase : int = path
def _lowercase ( self: str ):
'''simple docstring'''
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_lowerCamelCase : Any = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(_a ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
_lowerCamelCase : Tuple = (
Path(_a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_lowerCamelCase : List[Any] = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_a ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
_lowerCamelCase : int = json.load(_a )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=_a ,extra_context=_a ,)
_lowerCamelCase : Optional[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
_lowerCamelCase : Any = json.load(_a )
_lowerCamelCase : Tuple = configuration["lowercase_modelname"]
_lowerCamelCase : Optional[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(F"""{directory}/configuration.json""" )
_lowerCamelCase : List[Any] = "PyTorch" in generate_tensorflow_pytorch_and_flax
_lowerCamelCase : int = "TensorFlow" in generate_tensorflow_pytorch_and_flax
_lowerCamelCase : List[Any] = "Flax" in generate_tensorflow_pytorch_and_flax
_lowerCamelCase : Tuple = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(_a ,exist_ok=_a )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" ,exist_ok=_a )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" ,"w" ):
pass
shutil.move(
F"""{directory}/__init__.py""" ,F"""{model_dir}/__init__.py""" ,)
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""" ,F"""{model_dir}/configuration_{lowercase_model_name}.py""" ,)
def remove_copy_lines(__lowerCAmelCase: Any ):
with open(_a ,"r" ) as f:
_lowerCamelCase : int = f.readlines()
with open(_a ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_a )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""" ,F"""{model_dir}/modeling_{lowercase_model_name}.py""" ,)
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""" ,F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" ,)
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""" ,F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" ,)
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" ,F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" ,)
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""" ,F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" ,)
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" ,F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" ,)
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""" ,F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" ,)
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""" ,F"""{model_dir}/tokenization_{lowercase_model_name}.py""" ,)
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" ,F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple ):
# Create temp file
_lowerCamelCase : List[str] = mkstemp()
_lowerCamelCase : Tuple = False
with fdopen(_a ,"w" ) as new_file:
with open(_a ) as old_file:
for line in old_file:
new_file.write(_a )
if line_to_copy_below in line:
_lowerCamelCase : str = True
for line_to_copy in lines_to_copy:
new_file.write(_a )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(_a ,_a )
# Remove original file
remove(_a )
# Move new file
move(_a ,_a )
def skip_units(__lowerCAmelCase: Optional[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__lowerCAmelCase: List[str] ):
with open(_a ) as datafile:
_lowerCamelCase : Tuple = []
_lowerCamelCase : str = False
_lowerCamelCase : Tuple = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_lowerCamelCase : int = line.split("\"" )[1]
_lowerCamelCase : Optional[Any] = skip_units(_a )
elif "# Below: " in line and "##" not in line:
_lowerCamelCase : Any = line.split("\"" )[1]
_lowerCamelCase : str = skip_units(_a )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_a ,_a ,_a )
_lowerCamelCase : Optional[Any] = []
elif "# Replace with" in line and "##" not in line:
_lowerCamelCase : Dict = []
elif "##" not in line:
lines_to_copy.append(_a )
remove(_a )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(_a ) | 369 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCamelCase : list = []
for char_count in range(_lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''') | 340 | 0 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A_ :
"""simple docstring"""
def __init__( self: Any ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = data
_lowerCamelCase : List[Any] = [0x6745_2301, 0xEFCD_AB89, 0x98BA_DCFE, 0x1032_5476, 0xC3D2_E1F0]
@staticmethod
def _lowercase ( __lowerCAmelCase: Any ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xFFFF_FFFF
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64)
_lowerCamelCase : Optional[int] = self.data + padding + struct.pack(">Q" ,8 * len(self.data ) )
return padded_data
def _lowercase ( self: int ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 ,len(self.padded_data ) ,64 )
]
def _lowercase ( self: str ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = list(struct.unpack(">16L" ,_a ) ) + [0] * 64
for i in range(16 ,80 ):
_lowerCamelCase : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) ,1 )
return w
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.padding()
_lowerCamelCase : Dict = self.split_blocks()
for block in self.blocks:
_lowerCamelCase : Tuple = self.expand_block(_a )
_lowerCamelCase : int = self.h
for i in range(0 ,80 ):
if 0 <= i < 20:
_lowerCamelCase : Dict = (b & c) | ((~b) & d)
_lowerCamelCase : Optional[Any] = 0x5A82_7999
elif 20 <= i < 40:
_lowerCamelCase : Optional[Any] = b ^ c ^ d
_lowerCamelCase : Any = 0x6ED9_EBA1
elif 40 <= i < 60:
_lowerCamelCase : Optional[int] = (b & c) | (b & d) | (c & d)
_lowerCamelCase : Optional[int] = 0x8F1B_BCDC
elif 60 <= i < 80:
_lowerCamelCase : List[str] = b ^ c ^ d
_lowerCamelCase : int = 0xCA62_C1D6
_lowerCamelCase : List[Any] = (
self.rotate(_a ,5 ) + f + e + k + expanded_block[i] & 0xFFFF_FFFF,
a,
self.rotate(_a ,30 ),
c,
d,
)
_lowerCamelCase : Dict = (
self.h[0] + a & 0xFFFF_FFFF,
self.h[1] + b & 0xFFFF_FFFF,
self.h[2] + c & 0xFFFF_FFFF,
self.h[3] + d & 0xFFFF_FFFF,
self.h[4] + e & 0xFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = b"Test String"
assert SHAaHash(_lowerCamelCase ).final_hash() == hashlib.shaa(_lowerCamelCase ).hexdigest() # noqa: S324
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
_lowerCamelCase : Optional[Any] = parser.parse_args()
_lowerCamelCase : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
_lowerCamelCase : List[str] = f.read()
else:
_lowerCamelCase : Tuple = bytes(_lowerCamelCase , "utf-8" )
print(SHAaHash(_lowerCamelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod() | 370 |
"""simple docstring"""
_lowerCAmelCase : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Any = [False] * len(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = [s]
_lowerCamelCase : str = True
while queue:
_lowerCamelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCamelCase : Any = True
_lowerCamelCase : Any = u
return visited[t]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase ))
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = float("Inf" )
_lowerCamelCase : Dict = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_lowerCamelCase : Optional[Any] = sink
while v != source:
_lowerCamelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : List[str] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 340 | 0 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Any = AlbertConfig.from_json_file(_lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowerCamelCase : Dict = AlbertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path) | 371 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 340 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class A_ ( nn.Module ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = jnp.floataa
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self: List[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = hidden_states.shape
_lowerCamelCase : Any = jax.image.resize(
__lowercase ,shape=(batch, height * 2, width * 2, channels) ,method="nearest" ,)
_lowerCamelCase : int = self.conv(__lowercase )
return hidden_states
class A_ ( nn.Module ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = jnp.floataa
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self: int ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = self.conv(__lowercase )
return hidden_states
class A_ ( nn.Module ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = None
lowerCAmelCase__ = jnp.floataa
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.in_channels if self.out_channels is None else self.out_channels
_lowerCamelCase : int = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
_lowerCamelCase : Optional[int] = nn.Conv(
__lowercase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
_lowerCamelCase : str = nn.Dense(__lowercase ,dtype=self.dtype )
_lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
_lowerCamelCase : str = nn.Dropout(self.dropout_prob )
_lowerCamelCase : List[str] = nn.Conv(
__lowercase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
_lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCamelCase : Optional[int] = None
if use_nin_shortcut:
_lowerCamelCase : List[str] = nn.Conv(
__lowercase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="VALID" ,dtype=self.dtype ,)
def __call__( self: Optional[int] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: str=True ):
'''simple docstring'''
_lowerCamelCase : Tuple = hidden_states
_lowerCamelCase : Optional[int] = self.norma(__lowercase )
_lowerCamelCase : Dict = nn.swish(__lowercase )
_lowerCamelCase : Optional[Any] = self.conva(__lowercase )
_lowerCamelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowercase ) )
_lowerCamelCase : Optional[Any] = jnp.expand_dims(jnp.expand_dims(__lowercase ,1 ) ,1 )
_lowerCamelCase : str = hidden_states + temb
_lowerCamelCase : Tuple = self.norma(__lowercase )
_lowerCamelCase : Optional[int] = nn.swish(__lowercase )
_lowerCamelCase : Optional[int] = self.dropout(__lowercase ,__lowercase )
_lowerCamelCase : List[Any] = self.conva(__lowercase )
if self.conv_shortcut is not None:
_lowerCamelCase : Any = self.conv_shortcut(__lowercase )
return hidden_states + residual | 350 |
"""simple docstring"""
from collections import defaultdict
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(_lowerCamelCase )
if ret % 2 == 0:
cuts.append(_lowerCamelCase )
return ret
def lowerCamelCase_( ) -> int:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9
_lowerCAmelCase : str = defaultdict(list)
_lowerCAmelCase : dict[int, bool] = {}
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 340 | 0 |
"""simple docstring"""
import socket
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCamelCase : Optional[int] = socket.gethostname()
_lowerCamelCase : str = 12312
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
_lowerCamelCase : List[str] = sock.recv(1024 )
if not data:
break
out_file.write(_UpperCAmelCase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main() | 351 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__'''
_lowerCAmelCase : Dict = '''Dummy User'''
_lowerCAmelCase : Optional[int] = '''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'''
_lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co'''
_lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
_lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
_lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(_lowerCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
return HfApi(endpoint=_lowerCamelCase )
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = HfFolder.get_token()
HfFolder.save_token(_lowerCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
def _cleanup_repo(_lowerCamelCase ):
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
@contextmanager
def _temporary_repo(_lowerCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_lowerCamelCase )
return _temporary_repo
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_ | 340 | 0 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class A_ ( lowerCAmelCase_ ):
# warning at import time
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , lowerCAmelCase_ , ) | 352 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class A_ ( _a ):
def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,)
# merge samples
if i == 0:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample
else:
_lowerCamelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,)
idx += 1
_lowerCamelCase : int = model_path_to_save + F"""_{idx}"""
@classmethod
def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : int = 0
_lowerCamelCase : str = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCamelCase : Dict = pretrained_model_path
while os.path.isdir(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
controlnets.append(__lowerCAmelCase )
idx += 1
_lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
import numpy as np
from PIL import Image
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = np.array(lowerCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : List[str] = 0
# compute the shape of the output matrix
_lowerCamelCase : List[str] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_lowerCamelCase : int = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_lowerCamelCase : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 0
return updated_arr
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = np.array(lowerCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = 0
_lowerCamelCase : List[Any] = 0
# compute the shape of the output matrix
_lowerCamelCase : Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_lowerCamelCase : str = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_lowerCamelCase : Union[str, Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Any = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_lowerCAmelCase = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 353 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : int = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_lowerCamelCase : Tuple = "segformer.encoder." + key
if key.startswith("backbone" ):
_lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" )
if "norm" in key:
_lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" )
if "layer_norm1" in key:
_lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )]
_lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" )
if "attn.q" in key:
_lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_lowerCamelCase : Tuple = key.replace("attn" , "attention.self" )
if "fc1" in key:
_lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" )
if "fc2" in key:
_lowerCamelCase : Dict = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_lowerCamelCase : int = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" )
_lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )]
_lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" )
if key.startswith("head" ):
_lowerCamelCase : List[str] = key.replace("head" , "classifier" )
_lowerCamelCase : Union[str, Any] = value
return new_state_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_lowerCamelCase : int = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]]
_lowerCamelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCamelCase : Optional[Any] = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Any = SegformerConfig()
_lowerCamelCase : int = False
# set attributes based on model_name
_lowerCamelCase : Any = "huggingface/label-files"
if "segformer" in model_name:
_lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_lowerCamelCase : str = 150
_lowerCamelCase : Dict = "ade20k-id2label.json"
_lowerCamelCase : Dict = (1, 150, 128, 128)
elif "city" in model_name:
_lowerCamelCase : List[str] = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
_lowerCamelCase : Tuple = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = model_name[4:6]
_lowerCamelCase : Tuple = 1000
_lowerCamelCase : List[Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : List[Any] = (1, 1000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
_lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : int = 256
elif size == "b2":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : List[Any] = 768
_lowerCamelCase : Any = [3, 4, 6, 3]
elif size == "b3":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : Union[str, Any] = 768
_lowerCamelCase : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
_lowerCamelCase : str = [64, 128, 320, 512]
_lowerCamelCase : Optional[Any] = 768
_lowerCamelCase : Dict = [3, 8, 27, 3]
elif size == "b5":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : Tuple = 768
_lowerCamelCase : Tuple = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_lowerCamelCase : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
# prepare image
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
else:
_lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
_lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase )
else:
_lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
_lowerCamelCase : Any = model(_lowerCamelCase )
_lowerCamelCase : Dict = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCamelCase : int = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCamelCase : Dict = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCamelCase : Optional[int] = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCamelCase : List[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
_lowerCamelCase : Dict = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 340 | 0 |
"""simple docstring"""
from math import factorial
def lowerCamelCase_( _lowerCamelCase = 20 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_lowerCamelCase : int = n // 2
return int(factorial(_lowerCAmelCase ) / (factorial(_lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
_lowerCAmelCase : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''') | 354 |
"""simple docstring"""
_lowerCAmelCase : dict[tuple[int, int, int], int] = {}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowerCamelCase : Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 )
_lowerCamelCase : List[Any] = state_late + state_absent + state_ontime
_lowerCamelCase : int = prizestrings
return prizestrings
def lowerCamelCase_( _lowerCamelCase = 30 ) -> int:
'''simple docstring'''
return _calculate(_lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 340 | 0 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Optional[int] = '\\n Text data.\n Second line of data.'
_lowerCAmelCase : List[str] = 'file'
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : str = bytes(_A , "utf-8" )
with zstd.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _A ) , "w" ) as f:
f.write(_A )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : int = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Dict = input_paths[compression_format]
_lowerCamelCase : Optional[int] = tmp_path / "cache"
_lowerCamelCase : str = DownloadConfig(cache_dir=_A , extract_compressed_file=_A )
_lowerCamelCase : List[str] = cached_path(_A , download_config=_A )
with open(_A ) as f:
_lowerCamelCase : Optional[int] = f.read()
with open(_A ) as f:
_lowerCamelCase : Optional[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : int = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : Optional[int] = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : int = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _A )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_A ) )
_lowerCamelCase : Tuple = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : Optional[int] = xz_file
_lowerCamelCase : Dict = (
DownloadConfig(extract_compressed_file=_A )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_A )
)
_lowerCamelCase : List[str] = cached_path(_A , download_config=_A )
assert Path(_A ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_A ).resolve() )
assert cached_path(_A ) == text_file
# relative path
_lowerCamelCase : Dict = str(Path(_A ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_A ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_A ):
cached_path(_A )
# relative path
_lowerCamelCase : Optional[Any] = "./__missing_file__.txt"
with pytest.raises(_A ):
cached_path(_A )
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : List[str] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_A ) as f:
_lowerCamelCase : Any = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _A )
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
with pytest.raises(_A ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _A )
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_A ):
http_get("https://huggingface.co" , temp_file=_A )
with pytest.raises(_A ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _A )
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_A ):
ftp_get("ftp://huggingface.co" , temp_file=_A )
with pytest.raises(_A ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _A )
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_A ):
fsspec_get("s3://huggingface.co" , temp_file=_A )
with pytest.raises(_A ):
fsspec_head("s3://huggingface.co" ) | 355 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : int = str(_lowerCamelCase )
return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" )
def lowerCamelCase_( ) -> int | None:
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
_lowerCamelCase : Union[str, Any] = 100002 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_lowerCamelCase : Tuple = 1002003 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 340 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class A_ ( nn.Module ):
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = jnp.floataa
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self: Optional[int] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = hidden_states.shape
_lowerCamelCase : str = jax.image.resize(
__lowerCAmelCase ,shape=(batch, height * 2, width * 2, channels) ,method="nearest" ,)
_lowerCamelCase : Tuple = self.conv(__lowerCAmelCase )
return hidden_states
class A_ ( nn.Module ):
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = jnp.floataa
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Dict = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self: int ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.conv(__lowerCAmelCase )
return hidden_states
class A_ ( nn.Module ):
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = None
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = None
lowerCAmelCase__ = jnp.floataa
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.in_channels if self.out_channels is None else self.out_channels
_lowerCamelCase : Union[str, Any] = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
_lowerCamelCase : Optional[int] = nn.Conv(
__lowerCAmelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
_lowerCamelCase : Optional[int] = nn.Dense(__lowerCAmelCase ,dtype=self.dtype )
_lowerCamelCase : int = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
_lowerCamelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
_lowerCamelCase : Tuple = nn.Conv(
__lowerCAmelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
_lowerCamelCase : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCamelCase : Dict = None
if use_nin_shortcut:
_lowerCamelCase : Any = nn.Conv(
__lowerCAmelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="VALID" ,dtype=self.dtype ,)
def __call__( self: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: int=True ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = hidden_states
_lowerCamelCase : List[str] = self.norma(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = nn.swish(__lowerCAmelCase )
_lowerCamelCase : str = self.conva(__lowerCAmelCase )
_lowerCamelCase : Any = self.time_emb_proj(nn.swish(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCAmelCase ,1 ) ,1 )
_lowerCamelCase : Union[str, Any] = hidden_states + temb
_lowerCamelCase : Dict = self.norma(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = nn.swish(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.dropout(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.conva(__lowerCAmelCase )
if self.conv_shortcut is not None:
_lowerCamelCase : List[str] = self.conv_shortcut(__lowerCAmelCase )
return hidden_states + residual | 356 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( _a ):
lowerCAmelCase__ = 'char'
lowerCAmelCase__ = 'bpe'
lowerCAmelCase__ = 'wp'
_lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( _a ):
lowerCAmelCase__ = ['image_processor', 'char_tokenizer']
lowerCAmelCase__ = 'ViTImageProcessor'
lowerCAmelCase__ = 'MgpstrTokenizer'
def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,__lowerCAmelCase ,)
_lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" )
_lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is not None:
_lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCamelCase : Tuple = encodings["input_ids"]
return inputs
def _lowercase ( self: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences
_lowerCamelCase : Dict = char_preds.size(0 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" )
_lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" )
_lowerCamelCase : List[str] = []
_lowerCamelCase : str = []
for i in range(__lowerCAmelCase ):
_lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = final_strs
_lowerCamelCase : int = final_scores
_lowerCamelCase : str = char_strs
_lowerCamelCase : Dict = bpe_strs
_lowerCamelCase : int = wp_strs
return out
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_lowerCamelCase : int = self.char_decode
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Optional[int] = "[s]"
elif format == DecodeType.BPE:
_lowerCamelCase : Dict = self.bpe_decode
_lowerCamelCase : str = 2
_lowerCamelCase : Union[str, Any] = "#"
elif format == DecodeType.WORDPIECE:
_lowerCamelCase : int = self.wp_decode
_lowerCamelCase : List[str] = 102
_lowerCamelCase : List[Any] = "[SEP]"
else:
raise ValueError(F"""Format {format} is not supported.""" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], []
_lowerCamelCase : Any = pred_logits.size(0 )
_lowerCamelCase : int = pred_logits.size(1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:]
_lowerCamelCase : List[str] = decoder(__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 )
_lowerCamelCase : Any = preds_max_prob[:, 1:]
for index in range(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_str[index][:pred_eos]
_lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist()
_lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1
_lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
_lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCAmelCase )
conf_scores.append(__lowerCAmelCase )
return dec_strs, conf_scores
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs | 340 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = len(_lowerCamelCase )
_lowerCamelCase : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase : Optional[Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase : Optional[int] = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase : Optional[Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod() | 357 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_lowerCAmelCase : List[Any] = get_logger(__name__)
class A_ :
lowerCAmelCase__ = 'dummy_data'
lowerCAmelCase__ = 'datasets'
lowerCAmelCase__ = False
def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,):
'''simple docstring'''
_lowerCamelCase : str = 0
_lowerCamelCase : List[str] = dataset_name
_lowerCamelCase : Optional[int] = cache_dir
_lowerCamelCase : Optional[int] = use_local_dummy_data
_lowerCamelCase : int = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : Tuple = str(__lowerCAmelCase )
# to be downloaded
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self._dummy_file is None:
_lowerCamelCase : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : Optional[int] = cached_path(
__lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase )
return os.path.join(__lowerCAmelCase ,self.dummy_file_name )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self._bucket_url is None:
_lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,(list, tuple) ):
return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase )
else:
return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return path
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return {}
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
for single_url in single_urls:
download_callback(__lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = single_urls
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Union[str, Any] = single_urls
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) )
_lowerCamelCase : List[Any] = value
# make sure that values are unique
if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url )
_lowerCamelCase : Optional[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__lowerCAmelCase )
return dummy_data_list
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
def _iter_archive_members(__lowerCAmelCase: Any ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : Tuple = Path(self.dummy_file ).parent
_lowerCamelCase : str = path.relative_to(__lowerCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase )
_lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__lowerCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Dict = len(A__ )
for i in range(n - 1 ):
for j in range(i + 1 , A__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
if len(A__ ) <= 1:
return arr, 0
_lowerCamelCase : Tuple = len(A__ ) // 2
_lowerCamelCase : int = arr[0:mid]
_lowerCamelCase : str = arr[mid:]
_lowerCamelCase, _lowerCamelCase : Any = count_inversions_recursive(A__ )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = count_inversions_recursive(A__ )
_lowerCamelCase, _lowerCamelCase : Any = _count_cross_inversions(A__ , A__ )
_lowerCamelCase : Union[str, Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = []
_lowerCamelCase : Optional[int] = 0
while i < len(A__ ) and j < len(A__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(A__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(A__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : int = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCamelCase : Dict = count_inversions_bf(A__ )
_lowerCamelCase, _lowerCamelCase : Any = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , A__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCamelCase : Tuple = count_inversions_bf(A__ )
_lowerCamelCase, _lowerCamelCase : int = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , A__ )
# an empty list should also have zero inversions
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = count_inversions_bf(A__ )
_lowerCamelCase, _lowerCamelCase : Dict = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , A__ )
if __name__ == "__main__":
main() | 358 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_lowerCamelCase : int = precision
_lowerCamelCase : Dict = ceil(precision / 14 )
_lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
_lowerCamelCase : int = 1
_lowerCamelCase : Optional[int] = 13591409
_lowerCamelCase : int = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''') | 340 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self: Optional[Any] ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: str ):
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." ,__lowerCAmelCase ,)
super().__init__(*__lowerCAmelCase ,**__lowerCAmelCase ) | 359 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowerCamelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCamelCase : Any = i / num_diffusion_timesteps
_lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class A_ ( _a , _a ):
@register_to_config
def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
_lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 1.0 - self.betas
_lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 )
_lowerCamelCase : int = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase : Tuple = 1.0
# setable values
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() )
_lowerCamelCase : List[str] = variance_type
def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
return sample
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ):
'''simple docstring'''
_lowerCamelCase : str = num_inference_steps
_lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ):
'''simple docstring'''
if prev_timestep is None:
_lowerCamelCase : List[str] = t - 1
_lowerCamelCase : Optional[int] = self.alphas_cumprod[t]
_lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : List[Any] = self.betas[t]
else:
_lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) )
_lowerCamelCase : str = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase : str = variance.log()
_lowerCamelCase : str = beta.log()
_lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2
_lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 )
else:
_lowerCamelCase : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase : List[Any] = t - 1
_lowerCamelCase : Dict = self.alphas_cumprod[t]
_lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Any = self.betas[t]
_lowerCamelCase : str = self.alphas[t]
else:
_lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : List[Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Any = torch.clamp(
__lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase : Union[str, Any] = 0
if t > 0:
_lowerCamelCase : Dict = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device )
_lowerCamelCase : Any = self._get_variance(
__lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,)
if self.variance_type == "fixed_small_log":
_lowerCamelCase : Optional[Any] = variance
elif self.variance_type == "learned_range":
_lowerCamelCase : Optional[int] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
_lowerCamelCase : Dict = variance * variance_noise
_lowerCamelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,):
'''simple docstring'''
_lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
_lowerCamelCase : Any = timesteps.to(original_samples.device )
_lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 340 | 0 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class A_ ( logging.LoggerAdapter ):
@staticmethod
def _lowercase ( __lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowercase ( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
_lowerCamelCase : int = kwargs.pop("main_process_only" ,__UpperCamelCase )
_lowerCamelCase : List[Any] = kwargs.pop("in_order" ,__UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.process(__UpperCamelCase ,__UpperCamelCase )
self.logger.log(__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase )
elif in_order:
_lowerCamelCase : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCamelCase, _lowerCamelCase : Any = self.process(__UpperCamelCase ,__UpperCamelCase )
self.logger.log(__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase )
state.wait_for_everyone()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = None ) -> int:
'''simple docstring'''
if log_level is None:
_lowerCamelCase : Optional[int] = os.environ.get("ACCELERATE_LOG_LEVEL" , a__ )
_lowerCamelCase : Any = logging.getLogger(a__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(a__ , {} ) | 360 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : str = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for attribute in key.split("." ):
_lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCamelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCamelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCamelCase : List[Any] = value
elif weight_type == "bias":
_lowerCamelCase : str = value
elif weight_type == "running_mean":
_lowerCamelCase : Optional[int] = value
elif weight_type == "running_var":
_lowerCamelCase : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase : int = value
elif weight_type == "inv_freq":
_lowerCamelCase : List[str] = value
else:
_lowerCamelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = fairseq_model.state_dict()
_lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCamelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCamelCase : int = True
if "*" in mapped_key:
_lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
_lowerCamelCase : int = None
elif "pos_bias_v" in name:
_lowerCamelCase : Any = None
elif "weight_g" in name:
_lowerCamelCase : Any = "weight_g"
elif "weight_v" in name:
_lowerCamelCase : Any = "weight_v"
elif "bias" in name:
_lowerCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : Dict = "weight"
elif "running_mean" in name:
_lowerCamelCase : str = "running_mean"
elif "inv_freq" in name:
_lowerCamelCase : List[Any] = "inv_freq"
elif "running_var" in name:
_lowerCamelCase : Tuple = "running_var"
elif "num_batches_tracked" in name:
_lowerCamelCase : str = "num_batches_tracked"
else:
_lowerCamelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = full_name.split("conv_layers." )[-1]
_lowerCamelCase : List[Any] = name.split("." )
_lowerCamelCase : Union[str, Any] = int(items[0] )
_lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
_lowerCamelCase : Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCamelCase : List[Any] = "rotary"
if is_finetuned:
if dict_path:
_lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase : Optional[int] = target_dict.pad_index
_lowerCamelCase : Dict = target_dict.bos_index
_lowerCamelCase : Optional[Any] = target_dict.eos_index
_lowerCamelCase : str = len(target_dict.symbols )
_lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False
_lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" )
_lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCamelCase : Dict = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 340 | 0 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase : Optional[Any] = '''\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
'''
_lowerCAmelCase : Tuple = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCAmelCase : Tuple = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("string" ,id="sequence" ),
"references": datasets.Value("string" ,id="sequence" ),
} ) ,codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] ,reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] ,)
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_lowerCamelCase : str = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
_lowerCamelCase : List[str] = rouge_scorer.RougeScorer(rouge_types=UpperCAmelCase__ ,use_stemmer=UpperCAmelCase__ )
if use_aggregator:
_lowerCamelCase : Optional[Any] = scoring.BootstrapAggregator()
else:
_lowerCamelCase : Optional[Any] = []
for ref, pred in zip(UpperCAmelCase__ ,UpperCAmelCase__ ):
_lowerCamelCase : Any = scorer.score(UpperCAmelCase__ ,UpperCAmelCase__ )
if use_aggregator:
aggregator.add_scores(UpperCAmelCase__ )
else:
scores.append(UpperCAmelCase__ )
if use_aggregator:
_lowerCamelCase : List[str] = aggregator.aggregate()
else:
_lowerCamelCase : List[str] = {}
for key in scores[0]:
_lowerCamelCase : Dict = [score[key] for score in scores]
return result
| 361 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if point:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for item in point:
if not isinstance(_lowerCamelCase , (int, float) ):
_lowerCamelCase : Dict = (
"Expected a list of numbers as input, found "
F"""{type(_lowerCamelCase ).__name__}"""
)
raise TypeError(_lowerCamelCase )
else:
_lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}"""
raise TypeError(_lowerCamelCase )
else:
raise ValueError("Missing an input" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 340 | 0 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase_( ) -> str:
'''simple docstring'''
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
_lowerCamelCase : Any = [1, 2, 3]
with pytest.raises(__lowerCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=2 )
with pytest.raises(__lowerCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Tuple = [1, 2]
_lowerCamelCase : Dict = {"""a""": 1, """b""": 2}
_lowerCamelCase : List[str] = {"""a""": [1, 2], """b""": [3, 4]}
_lowerCamelCase : Optional[Any] = {"""a""": {"""1""": 1}, """b""": 2}
_lowerCamelCase : Optional[Any] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
_lowerCamelCase : List[Any] = [2, 3]
_lowerCamelCase : Union[str, Any] = {"""a""": 2, """b""": 3}
_lowerCamelCase : str = {"""a""": [2, 3], """b""": [4, 5]}
_lowerCamelCase : Tuple = {"""a""": {"""1""": 2}, """b""": 3}
_lowerCamelCase : Tuple = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("spark" ):
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
| 362 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = np.inf
def set_batch_size(_lowerCamelCase ) -> None:
nonlocal batch_size
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary":
_lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowerCamelCase , _lowerCamelCase )
return None if batch_size is np.inf else batch_size
class A_ ( _a ):
def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(
__lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths}
_lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1]
_lowerCamelCase : int = Parquet(
cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if self.streaming:
_lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,)
_lowerCamelCase : Any = self.builder.as_dataset(
split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : Any = dataset
_lowerCamelCase : Any = path_or_buf
_lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features )
_lowerCamelCase : List[str] = parquet_writer_kwargs
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"wb+" ) as buffer:
_lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
else:
_lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
return written
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase )
_lowerCamelCase : List[str] = self.dataset.features.arrow_schema
_lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,):
_lowerCamelCase : List[str] = query_table(
table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written | 340 | 0 |
"""simple docstring"""
from __future__ import annotations
class A_ :
def __init__( self: Optional[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Any = text, pattern
_lowerCamelCase, _lowerCamelCase : Tuple = len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def _lowercase ( self: str ,__lowerCAmelCase: int ):
'''simple docstring'''
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCamelCase : str = self.mismatch_in_text(__SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(__SCREAMING_SNAKE_CASE )
else:
_lowerCamelCase : List[str] = self.match_in_pattern(self.text[mismatch_index] )
_lowerCamelCase : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowerCAmelCase : int = 'ABAABA'
_lowerCAmelCase : Optional[Any] = 'AB'
_lowerCAmelCase : Any = BoyerMooreSearch(text, pattern)
_lowerCAmelCase : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions) | 363 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"]
_lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] )
return output
_lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments)
_lowerCAmelCase : Optional[int] = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase : Any = multiprocessing.cpu_count()
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : Any = time.time()
_lowerCAmelCase : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 340 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class A_ ( __SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ = 4_2
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 364 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 340 | 0 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class A_ ( __snake_case ):
lowerCAmelCase__ = "vision-encoder-decoder"
lowerCAmelCase__ = True
def __init__( self: Union[str, Any] ,**__lowerCAmelCase: Any ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_lowerCamelCase : List[str] = kwargs.pop("encoder" )
_lowerCamelCase : Any = encoder_config.pop("model_type" )
_lowerCamelCase : str = kwargs.pop("decoder" )
_lowerCamelCase : Tuple = decoder_config.pop("model_type" )
_lowerCamelCase : int = AutoConfig.for_model(lowerCamelCase_ ,**lowerCamelCase_ )
_lowerCamelCase : str = AutoConfig.for_model(lowerCamelCase_ ,**lowerCamelCase_ )
_lowerCamelCase : Any = True
@classmethod
def _lowercase ( cls: Tuple ,__lowerCAmelCase: PretrainedConfig ,__lowerCAmelCase: PretrainedConfig ,**__lowerCAmelCase: str ):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_lowerCamelCase : List[str] = True
_lowerCamelCase : List[str] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**lowerCamelCase_ )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Any = self.encoder.to_dict()
_lowerCamelCase : str = self.decoder.to_dict()
_lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
class A_ ( __snake_case ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return 1e-4
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class A_ ( __snake_case ):
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = OrderedDict()
_lowerCamelCase : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_lowerCamelCase : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_lowerCamelCase : List[str] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: "PreTrainedTokenizerBase" ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional["TensorType"] = None ,):
'''simple docstring'''
import torch
_lowerCamelCase : Dict = OrderedDict()
_lowerCamelCase : Union[str, Any] = super().generate_dummy_inputs(
lowerCamelCase_ ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,is_pair=lowerCamelCase_ ,framework=lowerCamelCase_ )
_lowerCamelCase : Dict = dummy_input["""input_ids"""].shape
_lowerCamelCase : Tuple = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCamelCase : Union[str, Any] = dummy_input.pop("input_ids" )
_lowerCamelCase : List[str] = dummy_input.pop("attention_mask" )
_lowerCamelCase : List[str] = torch.zeros(lowerCamelCase_ )
return common_inputs
class A_ ( __snake_case ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase_ )
def _lowercase ( self: int ,__lowerCAmelCase: PretrainedConfig ,__lowerCAmelCase: PretrainedConfig ,__lowerCAmelCase: str = "default" ):
'''simple docstring'''
_lowerCamelCase : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase_ ,lowerCamelCase_ ) | 365 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = 'masked_bert'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = pruning_method
_lowerCamelCase : str = mask_init
_lowerCamelCase : List[Any] = mask_scale | 340 | 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowerCAmelCase : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
_lowerCAmelCase : int = """sshleifer/student_marian_en_ro_6_1"""
_lowerCAmelCase : List[str] = """sshleifer/tiny-mbart"""
@require_torch
class A_ ( _a ):
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Tuple=False ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: List[str]=True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.run_trainer(
eval_steps=1 ,max_len=12 ,model_name=_lowercase ,num_train_epochs=1 ,distributed=_lowercase ,extra_args_str=_lowercase ,predict_with_generate=_lowercase ,do_train=_lowercase ,do_eval=_lowercase ,do_predict=_lowercase ,)
_lowerCamelCase : str = TrainerState.load_from_json(os.path.join(_lowercase ,"trainer_state.json" ) ).log_history
if not do_eval:
return
_lowerCamelCase : Optional[Any] = [log for log in logs if "eval_loss" in log.keys()]
_lowerCamelCase : List[Any] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_lowerCamelCase : List[str] = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] ,_lowercase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _lowercase ( self: int ):
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _lowercase ( self: Dict ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=_lowercase )
@require_torch_multi_gpu
def _lowercase ( self: int ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=_lowercase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=_lowercase ,extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self: str ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=_lowercase ,extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self: List[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=_lowercase ,extra_args_str="--sharded_ddp zero_dp_2" ,predict_with_generate=_lowercase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.run_seqaseq_quick(
distributed=_lowercase ,extra_args_str="--sharded_ddp zero_dp_2 --fp16" ,predict_with_generate=_lowercase )
@require_apex
@require_torch_gpu
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=_lowercase ,extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase ,extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def _lowercase ( self: List[str] ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : int = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
_lowerCamelCase : Optional[Any] = experiments[experiment_id]
_lowerCamelCase : Dict = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
_lowerCamelCase : Tuple = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase ,extra_args_str=data["extra_args_str"] )
_lowerCamelCase : Tuple = len(re.findall(_lowercase ,cl.err ) )
self.assertEqual(_lowercase ,data["n_matches"] )
@slow
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : str = self.run_trainer(
eval_steps=2 ,max_len=128 ,model_name=_lowercase ,learning_rate=3e-4 ,num_train_epochs=10 ,distributed=_lowercase ,)
# Check metrics
_lowerCamelCase : Optional[Any] = TrainerState.load_from_json(os.path.join(_lowercase ,"trainer_state.json" ) ).log_history
_lowerCamelCase : Dict = [log for log in logs if "eval_loss" in log.keys()]
_lowerCamelCase : Tuple = eval_metrics[0]
_lowerCamelCase : List[Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] ,_lowercase )
# test if do_predict saves generations and metrics
_lowerCamelCase : Dict = os.listdir(_lowercase )
_lowerCamelCase : Tuple = {os.path.basename(_lowercase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _lowercase ( self: List[str] ):
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase: str ) -> Tuple[int, float]:
_lowerCamelCase : Tuple = "--skip_memory_metrics 0"
_lowerCamelCase : str = self.run_trainer(
max_len=128 ,model_name=_lowercase ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=_lowercase ,distributed=_lowercase ,extra_args_str=_lowercase ,do_eval=_lowercase ,do_predict=_lowercase ,n_gpus_to_use=1 ,)
# Check metrics
_lowerCamelCase : Any = TrainerState.load_from_json(Path(_lowercase ,"trainer_state.json" ) ).log_history
_lowerCamelCase : int = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
_lowerCamelCase : Optional[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
_lowerCamelCase : Tuple = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_lowerCamelCase : int = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_lowerCamelCase : Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
_lowerCamelCase : List[str] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_lowerCamelCase : int = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_lowerCamelCase : Tuple = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase ,_lowercase ,"should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" ,)
self.assertGreater(
_lowercase ,_lowercase ,"should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" ,)
self.assertEqual(
_lowercase ,_lowercase ,F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[str] = 3e-3 ,__lowerCAmelCase: int = "adafactor" ,__lowerCAmelCase: Tuple = False ,__lowerCAmelCase: Optional[Any] = None ,__lowerCAmelCase: List[str] = 0 ,__lowerCAmelCase: Optional[Any] = True ,__lowerCAmelCase: Any = True ,__lowerCAmelCase: str = True ,__lowerCAmelCase: Dict = True ,__lowerCAmelCase: int = None ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
_lowerCamelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[str] = F"""\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n """.split()
_lowerCamelCase : List[Any] = F"""\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase )}\n """.split()
_lowerCamelCase : Optional[int] = "\n --do_predict\n ".split()
_lowerCamelCase : List[str] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_lowerCamelCase : Optional[Any] = get_gpu_count()
_lowerCamelCase : List[str] = get_torch_dist_unique_port()
_lowerCamelCase : Optional[int] = F"""\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n """.split()
_lowerCamelCase : Optional[int] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase ,env=self.get_env() )
else:
_lowerCamelCase : Optional[int] = ["run_translation.py"] + args
with patch.object(_lowercase ,"argv" ,_lowercase ):
main()
return output_dir | 366 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = random.Random()
_lowerCamelCase : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[int] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : List[str] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = max_length
_lowerCamelCase : Tuple = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = max_length
_lowerCamelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[int] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = max_length
_lowerCamelCase : Optional[Any] = 0.8
_lowerCamelCase : Union[str, Any] = 10
_lowerCamelCase : List[str] = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : str = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = 2
_lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : Optional[Any] = "Hello world"
_lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : List[str] = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Dict = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = emb.weight.shape
_lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Any = emb.weight.data
return lin_layer
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location="cpu" )
_lowerCamelCase : Optional[Any] = mam_aaa["args"] or mam_aaa["cfg"]["model"]
_lowerCamelCase : List[str] = mam_aaa["model"]
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : int = state_dict["encoder.embed_tokens.weight"].shape[0]
_lowerCamelCase : Optional[Any] = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
_lowerCamelCase : Dict = state_dict["decoder.embed_tokens.weight"]
_lowerCamelCase : int = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Any = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_lowerCAmelCase : int = parser.parse_args()
_lowerCAmelCase : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 367 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A_ ( _a ):
lowerCAmelCase__ = 'mobilenet_v1'
def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = depth_multiplier
_lowerCamelCase : Any = min_depth
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Dict = tf_padding
_lowerCamelCase : Union[str, Any] = classifier_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
class A_ ( _a ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return 1e-4 | 340 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : Optional[Any] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
_lowerCAmelCase : Dict = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
_lowerCAmelCase : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def _lowercase ( self: int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) ,reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] ,)
def _lowercase ( self: Any ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[int]=1 ,__lowerCAmelCase: str="binary" ,__lowerCAmelCase: Tuple=None ):
'''simple docstring'''
_lowerCamelCase : Any = fa_score(
__lowerCamelCase ,__lowerCamelCase ,labels=__lowerCamelCase ,pos_label=__lowerCamelCase ,average=__lowerCamelCase ,sample_weight=__lowerCamelCase )
return {"f1": float(__lowerCamelCase ) if score.size == 1 else score} | 368 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
for param in module.parameters():
_lowerCamelCase : Optional[int] = False
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_lowerCamelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Dict = plt.imshow(_lowerCamelCase )
fig.axes.get_xaxis().set_visible(_lowerCamelCase )
fig.axes.get_yaxis().set_visible(_lowerCamelCase )
plt.show()
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Tuple = datetime.now()
_lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp | 340 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=a__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=a__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=a__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=a__ , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=a__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=a__ , type=a__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=a__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=a__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
_lowerCamelCase : Union[str, Any] = parser.parse_args()
return args
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
def fn(_lowerCamelCase ):
return tokenizer(examples["text"] )
return fn
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = []
for i in range(len(tokenized_data["input_ids"] ) ):
_lowerCamelCase : List[str] = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
_lowerCamelCase : List[Any] = tf.train.Features(feature=a__ )
_lowerCamelCase : List[str] = tf.train.Example(features=a__ )
_lowerCamelCase : Tuple = example.SerializeToString()
records.append(a__ )
return records
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_lowerCamelCase : Optional[Any] = min(len(a__ ) , args.limit )
_lowerCamelCase : Any = dataset.select(range(a__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
_lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_lowerCamelCase : Optional[int] = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
_lowerCamelCase : Union[str, Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_lowerCamelCase : List[Any] = tokenize_function(a__ )
_lowerCamelCase : Optional[Any] = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_lowerCamelCase ):
# Concatenate all texts.
_lowerCamelCase : int = {k: sum(examples[k] , [] ) for k in examples.keys()}
_lowerCamelCase : Optional[int] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_lowerCamelCase : str = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_lowerCamelCase : int = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_lowerCamelCase : List[str] = dataset_tokenized.map(a__ , batched=a__ , batch_size=1000 , num_proc=4 )
_lowerCamelCase : int = 0
_lowerCamelCase : List[Any] = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
_lowerCamelCase : Tuple = grouped_dataset[shard : shard + args.shard_size]
_lowerCamelCase : Tuple = len(dataset_snapshot["input_ids"] )
_lowerCamelCase : Optional[int] = os.path.join(a__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
_lowerCamelCase : str = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
_lowerCamelCase : str = serialized_examples[i]
out_file.write(a__ )
print("Wrote file {} containing {} records".format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=a__ )
if __name__ == "__main__":
__snake_case : List[str] = parse_args()
main(args) | 369 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCamelCase : list = []
for char_count in range(_lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''') | 340 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ort.SessionOptions()
_lowerCamelCase : List[Any] = False
return options
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
_lowerCamelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
_lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
_lowerCamelCase : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="onnx" ,safety_checker=__lowerCAmelCase ,feature_extractor=__lowerCAmelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Dict = "A red cat sitting on a park bench"
_lowerCamelCase : List[str] = np.random.RandomState(0 )
_lowerCamelCase : str = pipe(
prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,mask_image=__lowerCAmelCase ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=15 ,generator=__lowerCAmelCase ,output_type="np" ,)
_lowerCamelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2 | 370 |
"""simple docstring"""
_lowerCAmelCase : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Any = [False] * len(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = [s]
_lowerCamelCase : str = True
while queue:
_lowerCamelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCamelCase : Any = True
_lowerCamelCase : Any = u
return visited[t]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase ))
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = float("Inf" )
_lowerCamelCase : Dict = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_lowerCamelCase : Optional[Any] = sink
while v != source:
_lowerCamelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : List[str] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 340 | 0 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_lowerCAmelCase : Any = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def lowerCamelCase_( _lowerCamelCase = "dhaka" , _lowerCamelCase = 5 ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = min(_lowerCamelCase , 50 ) # Prevent abuse!
_lowerCamelCase : Optional[Any] = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
_lowerCamelCase : Dict = requests.get("https://www.google.com/search" , params=_lowerCamelCase , headers=_lowerCamelCase )
_lowerCamelCase : str = BeautifulSoup(html.text , "html.parser" )
_lowerCamelCase : Optional[Any] = ''''''.join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
_lowerCamelCase : Dict = json.dumps(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = json.loads(_lowerCamelCase )
_lowerCamelCase : List[str] = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , _lowerCamelCase , )
if not matched_google_image_data:
return 0
_lowerCamelCase : Tuple = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(_lowerCamelCase ) , )
_lowerCamelCase : Optional[Any] = re.findall(
R"(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , _lowerCamelCase , )
for index, fixed_full_res_image in enumerate(_lowerCamelCase ):
if index >= max_images:
return index
_lowerCamelCase : Any = bytes(_lowerCamelCase , "ascii" ).decode(
"unicode-escape" )
_lowerCamelCase : str = bytes(_lowerCamelCase , "ascii" ).decode(
"unicode-escape" )
_lowerCamelCase : Tuple = urllib.request.build_opener()
_lowerCamelCase : List[Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(_lowerCamelCase )
_lowerCamelCase : Optional[int] = F"""query_{query.replace(' ' , '_' )}"""
if not os.path.exists(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
urllib.request.urlretrieve( # noqa: S310
_lowerCamelCase , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
_lowerCAmelCase : str = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('''Please provide a search term.''')
raise | 371 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 340 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple=13 ,__lowerCAmelCase: List[Any]=7 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: int=True ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Optional[int]=99 ,__lowerCAmelCase: Union[str, Any]=32 ,__lowerCAmelCase: Tuple=2 ,__lowerCAmelCase: Any=4 ,__lowerCAmelCase: List[Any]=37 ,__lowerCAmelCase: Optional[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: Dict=512 ,__lowerCAmelCase: List[Any]=16 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: int=0.02 ,__lowerCAmelCase: Optional[int]=False ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: List[str]="None" ,__lowerCAmelCase: Optional[int]=3 ,__lowerCAmelCase: Optional[int]=4 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Dict = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : List[str] = use_input_mask
_lowerCamelCase : Dict = use_token_type_ids
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : List[str] = type_sequence_label_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : List[Any] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : Optional[int] = relative_attention
_lowerCamelCase : Any = position_biased_input
_lowerCamelCase : List[Any] = pos_att_type
_lowerCamelCase : int = scope
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Tuple = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCamelCase : Dict = DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,initializer_range=self.initializer_range ,return_dict=__lowerCAmelCase ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = TFDebertaVaModel(config=__lowerCAmelCase )
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCamelCase : List[str] = [input_ids, input_mask]
_lowerCamelCase : int = model(__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = TFDebertaVaForMaskedLM(config=__lowerCAmelCase )
_lowerCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : str = TFDebertaVaForSequenceClassification(config=__lowerCAmelCase )
_lowerCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : Any = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : List[str] = TFDebertaVaForTokenClassification(config=__lowerCAmelCase )
_lowerCamelCase : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : str = TFDebertaVaForQuestionAnswering(config=__lowerCAmelCase )
_lowerCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : List[str] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A_ ( a_ , a_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = TFDebertaVaModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class A_ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
_lowerCamelCase : Optional[Any] = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_lowerCamelCase : str = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : int = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase )[0]
_lowerCamelCase : Union[str, Any] = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] ,__lowerCAmelCase ,atol=1e-4 ) | 350 |
"""simple docstring"""
from collections import defaultdict
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(_lowerCamelCase )
if ret % 2 == 0:
cuts.append(_lowerCamelCase )
return ret
def lowerCamelCase_( ) -> int:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9
_lowerCAmelCase : str = defaultdict(list)
_lowerCAmelCase : dict[int, bool] = {}
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 340 | 0 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase : Tuple = logging.getLogger(__name__)
_lowerCAmelCase : List[str] = '''pytorch_model.bin'''
@dataclasses.dataclass
class A_ :
lowerCAmelCase__ = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
lowerCAmelCase__ = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class A_ :
lowerCAmelCase__ = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
lowerCAmelCase__ = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
lowerCAmelCase__ = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A csv or a json file containing the validation data.'} )
lowerCAmelCase__ = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The name of the task to train on.'} , )
lowerCAmelCase__ = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class A_ :
lowerCAmelCase__ = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
lowerCAmelCase__ = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
lowerCAmelCase__ = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]'
} , )
lowerCAmelCase__ = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
lowerCAmelCase__ = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
lowerCAmelCase__ = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
lowerCAmelCase__ = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
lowerCAmelCase__ = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
lowerCAmelCase__ = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
lowerCAmelCase__ = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
lowerCAmelCase__ = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Random seed for initialization.'} , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[str] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_lowerCamelCase : Optional[Any] = dataset.filter(lambda _lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_lowerCamelCase : Tuple = int(eval_result * len(_SCREAMING_SNAKE_CASE ) )
print(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[Any] = dataset.sort("probability" , reverse=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Dict = dataset.select(range(_SCREAMING_SNAKE_CASE ) )
_lowerCamelCase : Optional[int] = dataset.remove_columns(["label", "probability"] )
_lowerCamelCase : str = dataset.rename_column("prediction" , "label" )
_lowerCamelCase : Tuple = dataset.map(lambda _lowerCamelCase : {"label": idalabel[example["label"]]} )
_lowerCamelCase : List[Any] = dataset.shuffle(seed=args.seed )
_lowerCamelCase : int = os.path.join(_SCREAMING_SNAKE_CASE , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
else:
dataset.to_json(_SCREAMING_SNAKE_CASE )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_lowerCamelCase : Optional[Any] = STModelArguments(model_name_or_path=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = STDataArguments(train_file=_SCREAMING_SNAKE_CASE , infer_file=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Union[str, Any] = STTrainingArguments(output_dir=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : str = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_SCREAMING_SNAKE_CASE ).items():
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for key, value in kwargs.items():
if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Sanity checks
_lowerCamelCase : str = {}
_lowerCamelCase : List[str] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_lowerCamelCase : Tuple = args.train_file
_lowerCamelCase : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_lowerCamelCase : List[str] = args.eval_file
for key in data_files:
_lowerCamelCase : List[str] = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_lowerCamelCase : List[Any] = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
_lowerCamelCase : Optional[int] = F"""{args.output_dir}/self-train_iter-{{}}""".format
_lowerCamelCase : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
_lowerCamelCase : int = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[Any] = False
# Show the progress bar
_lowerCamelCase : Optional[int] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_lowerCamelCase : List[Any] = data_dir_format(_SCREAMING_SNAKE_CASE )
assert os.path.exists(_SCREAMING_SNAKE_CASE )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_lowerCamelCase : str = os.path.join(_SCREAMING_SNAKE_CASE , "stage-1" )
_lowerCamelCase : Optional[Any] = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
arguments_dict.update({key: value} )
_lowerCamelCase : str = os.path.join(_SCREAMING_SNAKE_CASE , "best-checkpoint" , _SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , _SCREAMING_SNAKE_CASE )
finetune(**_SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
assert os.path.exists(_SCREAMING_SNAKE_CASE )
logger.info("Self-training job completed: iteration: %d, stage: 1." , _SCREAMING_SNAKE_CASE )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_lowerCamelCase : Any = os.path.join(_SCREAMING_SNAKE_CASE , "best-checkpoint" )
_lowerCamelCase : List[str] = os.path.join(_SCREAMING_SNAKE_CASE , "stage-2" )
# Update arguments_dict
_lowerCamelCase : Optional[int] = model_path
_lowerCamelCase : List[str] = data_files["train"]
_lowerCamelCase : str = current_output_dir
_lowerCamelCase : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , "best-checkpoint" , _SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , _SCREAMING_SNAKE_CASE )
finetune(**_SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
assert os.path.exists(_SCREAMING_SNAKE_CASE )
logger.info("Self-training job completed: iteration: %d, stage: 2." , _SCREAMING_SNAKE_CASE )
_lowerCamelCase : int = iteration
_lowerCamelCase : int = data_dir_format(iteration + 1 )
_lowerCamelCase : Dict = AutoConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "best-checkpoint" ) )
_lowerCamelCase : str = config.idalabel
_lowerCamelCase : Dict = os.path.join(_SCREAMING_SNAKE_CASE , "eval_results_best-checkpoint.json" )
_lowerCamelCase : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , "test_results_best-checkpoint.json" )
assert os.path.exists(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
_lowerCamelCase : str = float(json.load(_SCREAMING_SNAKE_CASE )[args.eval_metric] )
_lowerCamelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , "infer_output_best-checkpoint.csv" )
assert os.path.exists(_SCREAMING_SNAKE_CASE )
# Loading the dataset from local csv or json files.
_lowerCamelCase : int = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
_lowerCamelCase : Tuple = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
shutil.copy(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
shutil.copy(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
_lowerCamelCase : Dict = os.path.join(_SCREAMING_SNAKE_CASE , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_lowerCamelCase : int = eval_result
if best_iteration is None:
_lowerCamelCase : Any = new_iteration
_lowerCamelCase : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_lowerCamelCase : Optional[int] = new_iteration
_lowerCamelCase : str = new_eval_result
_lowerCamelCase : str = 0
else:
if new_eval_result == best_eval_result:
_lowerCamelCase : Union[str, Any] = new_iteration
_lowerCamelCase : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_lowerCamelCase : Any = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , _SCREAMING_SNAKE_CASE )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , _SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_SCREAMING_SNAKE_CASE , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(_SCREAMING_SNAKE_CASE , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , _SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_SCREAMING_SNAKE_CASE , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(_SCREAMING_SNAKE_CASE , "eval_results_best-iteration.json" ) , ) | 351 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__'''
_lowerCAmelCase : Dict = '''Dummy User'''
_lowerCAmelCase : Optional[int] = '''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'''
_lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co'''
_lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
_lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
_lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(_lowerCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
return HfApi(endpoint=_lowerCamelCase )
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = HfFolder.get_token()
HfFolder.save_token(_lowerCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
def _cleanup_repo(_lowerCamelCase ):
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
@contextmanager
def _temporary_repo(_lowerCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_lowerCamelCase )
return _temporary_repo
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_ | 340 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_lowerCAmelCase : Union[str, Any] = random.Random()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=1.0 , _lowerCamelCase=None , _lowerCamelCase=None ) -> List[str]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = global_rng
_lowerCamelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ ( unittest.TestCase ):
def __init__( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int]=7 ,__lowerCAmelCase: List[Any]=400 ,__lowerCAmelCase: Union[str, Any]=2_000 ,__lowerCAmelCase: Optional[int]=1 ,__lowerCAmelCase: Dict=0.0 ,__lowerCAmelCase: Union[str, Any]=16_000 ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Any=80 ,__lowerCAmelCase: List[str]=16 ,__lowerCAmelCase: Any=64 ,__lowerCAmelCase: str="hann_window" ,__lowerCAmelCase: Dict=80 ,__lowerCAmelCase: List[Any]=7_600 ,__lowerCAmelCase: Dict=1e-10 ,__lowerCAmelCase: List[Any]=True ,):
'''simple docstring'''
_lowerCamelCase : Dict = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : str = min_seq_length
_lowerCamelCase : int = max_seq_length
_lowerCamelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : int = feature_size
_lowerCamelCase : List[Any] = padding_value
_lowerCamelCase : Optional[Any] = sampling_rate
_lowerCamelCase : int = do_normalize
_lowerCamelCase : Optional[Any] = num_mel_bins
_lowerCamelCase : Optional[int] = hop_length
_lowerCamelCase : str = win_length
_lowerCamelCase : Union[str, Any] = win_function
_lowerCamelCase : Dict = fmin
_lowerCamelCase : str = fmax
_lowerCamelCase : Optional[int] = mel_floor
_lowerCamelCase : str = return_attention_mask
def _lowercase ( self: List[str] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: List[str]=False ,__lowerCAmelCase: Dict=False ):
'''simple docstring'''
def _flatten(__lowerCAmelCase: Optional[int] ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
_lowerCamelCase : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCamelCase : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_lowerCamelCase : Optional[int] = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def _lowercase ( self: int ,__lowerCAmelCase: int=False ,__lowerCAmelCase: Union[str, Any]=False ):
'''simple docstring'''
if equal_length:
_lowerCamelCase : List[Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase : Union[str, Any] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_lowerCamelCase : Optional[int] = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaFeatureExtractor
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = SpeechTaFeatureExtractionTester(self )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(__lowerCAmelCase ,axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase ,axis=0 ) - 1 ) < 1e-3 ) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
_lowerCamelCase : Dict = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
_lowerCamelCase : List[Any] = feat_extract(speech_inputs[0] ,return_tensors="np" ).input_values
_lowerCamelCase : Any = feat_extract(np_speech_inputs[0] ,return_tensors="np" ).input_values
self.assertTrue(np.allclose(__lowerCAmelCase ,__lowerCAmelCase ,atol=1e-3 ) )
# Test batched
_lowerCamelCase : Tuple = feat_extract(__lowerCAmelCase ,return_tensors="np" ).input_values
_lowerCamelCase : Any = feat_extract(__lowerCAmelCase ,return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase ,__lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase ,__lowerCAmelCase ,atol=1e-3 ) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
_lowerCamelCase : str = ['''longest''', '''max_length''', '''do_not_pad''']
_lowerCamelCase : List[str] = [None, 1_600, None]
for max_length, padding in zip(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = feat_extract(__lowerCAmelCase ,padding=__lowerCAmelCase ,max_length=__lowerCAmelCase ,return_tensors="np" )
_lowerCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Dict = range(800 ,1_400 ,200 )
_lowerCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
_lowerCamelCase : Union[str, Any] = ['''longest''', '''max_length''', '''do_not_pad''']
_lowerCamelCase : List[Any] = [None, 1_600, None]
for max_length, padding in zip(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[str] = feat_extract(__lowerCAmelCase ,max_length=__lowerCAmelCase ,padding=__lowerCAmelCase )
_lowerCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
_lowerCamelCase : str = feat_extract(
__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=1_000 ,padding="max_length" ,return_tensors="np" )
_lowerCamelCase : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
_lowerCamelCase : Optional[int] = feat_extract(
__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=1_000 ,padding="longest" ,return_tensors="np" )
_lowerCamelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
_lowerCamelCase : int = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
_lowerCamelCase : Optional[Any] = feat_extract(
__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=2_000 ,padding="longest" ,return_tensors="np" )
_lowerCamelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
_lowerCamelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : int = feature_extractor.pad([{"input_values": inputs}] ,return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCamelCase : Optional[int] = feature_extractor.pad([{"input_values": inputs}] ,return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : int = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
_lowerCamelCase : int = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase : Union[str, Any] = feature_extractor(audio_target=__lowerCAmelCase ,padding=__lowerCAmelCase ,return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_lowerCamelCase : Tuple = feature_extractor(speech_inputs[0] ,return_tensors="np" ).input_values
_lowerCamelCase : int = feature_extractor(np_speech_inputs[0] ,return_tensors="np" ).input_values
self.assertTrue(np.allclose(__lowerCAmelCase ,__lowerCAmelCase ,atol=1e-3 ) )
# Test batched
_lowerCamelCase : Any = feature_extractor(__lowerCAmelCase ,return_tensors="np" ).input_values
_lowerCamelCase : Any = feature_extractor(__lowerCAmelCase ,return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase ,__lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase ,__lowerCAmelCase ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCamelCase : Tuple = np.asarray(__lowerCAmelCase )
_lowerCamelCase : str = feature_extractor(__lowerCAmelCase ,return_tensors="np" ).input_values
_lowerCamelCase : Optional[int] = feature_extractor(__lowerCAmelCase ,return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase ,__lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase ,__lowerCAmelCase ,atol=1e-3 ) )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : List[Any] = feat_extract.model_input_names[0]
_lowerCamelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase ,processed_features[input_name] ) ) )
_lowerCamelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__lowerCAmelCase )
_lowerCamelCase : Dict = BatchFeature({input_name: speech_inputs} ,tensor_type="np" )
_lowerCamelCase : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCamelCase : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : str = feat_extract.model_input_names[0]
_lowerCamelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} ,tensor_type="pt" )
_lowerCamelCase : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCamelCase : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCamelCase : Union[str, Any] = feat_extract.model_input_names[0]
_lowerCamelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase : Dict = feat_extract.num_mel_bins # hack!
_lowerCamelCase : str = feat_extract.pad(__lowerCAmelCase ,padding="longest" ,return_tensors="np" )[input_name]
_lowerCamelCase : List[str] = feat_extract.pad(__lowerCAmelCase ,padding="longest" ,return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : str = self.feat_extract_dict
_lowerCamelCase : List[Any] = True
_lowerCamelCase : List[str] = self.feature_extraction_class(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCamelCase : List[Any] = [len(__lowerCAmelCase ) for x in speech_inputs]
_lowerCamelCase : List[str] = feat_extract.model_input_names[0]
_lowerCamelCase : str = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase : int = feat_extract.num_mel_bins # hack!
_lowerCamelCase : Optional[Any] = feat_extract.pad(__lowerCAmelCase ,padding="longest" ,return_tensors="np" )
self.assertIn("attention_mask" ,__lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Dict = self.feat_extract_dict
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : List[str] = self.feature_extraction_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCamelCase : List[Any] = [len(__lowerCAmelCase ) for x in speech_inputs]
_lowerCamelCase : Optional[int] = feat_extract.model_input_names[0]
_lowerCamelCase : Optional[int] = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase : Tuple = min(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = feat_extract.num_mel_bins # hack!
_lowerCamelCase : Dict = feat_extract.pad(
__lowerCAmelCase ,padding="max_length" ,max_length=__lowerCAmelCase ,truncation=__lowerCAmelCase ,return_tensors="np" )
self.assertIn("attention_mask" ,__lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
from datasets import load_dataset
_lowerCamelCase : Optional[int] = load_dataset("hf-internal-testing/librispeech_asr_dummy" ,"clean" ,split="validation" )
# automatic decoding with librispeech
_lowerCamelCase : List[str] = ds.sort("id" ).select(range(__lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
_lowerCamelCase : Union[str, Any] = self._load_datasamples(1 )
_lowerCamelCase : Optional[int] = SpeechTaFeatureExtractor()
_lowerCamelCase : Any = feature_extractor(__lowerCAmelCase ,return_tensors="pt" ).input_values
self.assertEquals(input_values.shape ,(1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] ,__lowerCAmelCase ,atol=1e-6 ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
_lowerCamelCase : Optional[Any] = self._load_datasamples(1 )
_lowerCamelCase : List[str] = SpeechTaFeatureExtractor()
_lowerCamelCase : List[str] = feature_extractor(audio_target=__lowerCAmelCase ,return_tensors="pt" ).input_values
self.assertEquals(input_values.shape ,(1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] ,__lowerCAmelCase ,atol=1e-4 ) ) | 352 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class A_ ( _a ):
def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,)
# merge samples
if i == 0:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample
else:
_lowerCamelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,)
idx += 1
_lowerCamelCase : int = model_path_to_save + F"""_{idx}"""
@classmethod
def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : int = 0
_lowerCamelCase : str = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCamelCase : Dict = pretrained_model_path
while os.path.isdir(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
controlnets.append(__lowerCAmelCase )
idx += 1
_lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = (UniPCMultistepScheduler,)
lowerCAmelCase__ = (('num_inference_steps', 2_5),)
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Any=0 ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[Any] = kwargs.pop("num_inference_steps" ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_sample
_lowerCamelCase : Optional[int] = 0.1 * sample
_lowerCamelCase : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : int = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
_lowerCamelCase : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
_lowerCamelCase : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = sample, sample
for t in range(__lowerCAmelCase ,time_step + scheduler.config.solver_order + 1 ):
_lowerCamelCase : Optional[int] = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase ).prev_sample
_lowerCamelCase : Tuple = new_scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowercase ( self: str ,__lowerCAmelCase: Optional[Any]=0 ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCamelCase : List[str] = kwargs.pop("num_inference_steps" ,__lowerCAmelCase )
_lowerCamelCase : str = self.dummy_sample
_lowerCamelCase : str = 0.1 * sample
_lowerCamelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : Dict = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCamelCase : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
_lowerCamelCase : List[Any] = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCamelCase : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCamelCase : Optional[Any] = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase ).prev_sample
_lowerCamelCase : str = new_scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowercase ( self: Dict ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: int ):
'''simple docstring'''
if scheduler is None:
_lowerCamelCase : Dict = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Tuple = 10
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : str = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowerCamelCase : List[str] = kwargs.pop("num_inference_steps" ,__lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.dummy_sample
_lowerCamelCase : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase ,"set_timesteps" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase ,"set_timesteps" ):
_lowerCamelCase : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCamelCase : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
_lowerCamelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCamelCase : Dict = scheduler.timesteps[5]
_lowerCamelCase : int = scheduler.timesteps[6]
_lowerCamelCase : str = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase ).prev_sample
_lowerCamelCase : str = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCamelCase : Dict = self.full_loop(scheduler=__lowerCAmelCase )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
_lowerCamelCase : Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCamelCase : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCamelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCamelCase : List[str] = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCamelCase : Tuple = self.full_loop(scheduler=__lowerCAmelCase )
_lowerCamelCase : Any = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,solver_order=__lowerCAmelCase ,solver_type=__lowerCAmelCase ,)
def _lowercase ( self: int ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase ,solver_type=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,)
_lowerCamelCase : Any = self.full_loop(
solver_order=__lowerCAmelCase ,solver_type=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,)
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def _lowercase ( self: Any ):
'''simple docstring'''
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase ,time_step=0 )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.full_loop()
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Any = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.10_14 ) < 1e-3
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(thresholding=__lowerCAmelCase ,dynamic_thresholding_ratio=0 )
_lowerCamelCase : Optional[Any] = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = 10
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Any ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : int = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Tuple = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps | 353 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : int = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_lowerCamelCase : Tuple = "segformer.encoder." + key
if key.startswith("backbone" ):
_lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" )
if "norm" in key:
_lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" )
if "layer_norm1" in key:
_lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )]
_lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" )
if "attn.q" in key:
_lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_lowerCamelCase : Tuple = key.replace("attn" , "attention.self" )
if "fc1" in key:
_lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" )
if "fc2" in key:
_lowerCamelCase : Dict = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_lowerCamelCase : int = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" )
_lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )]
_lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" )
if key.startswith("head" ):
_lowerCamelCase : List[str] = key.replace("head" , "classifier" )
_lowerCamelCase : Union[str, Any] = value
return new_state_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_lowerCamelCase : int = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]]
_lowerCamelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCamelCase : Optional[Any] = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Any = SegformerConfig()
_lowerCamelCase : int = False
# set attributes based on model_name
_lowerCamelCase : Any = "huggingface/label-files"
if "segformer" in model_name:
_lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_lowerCamelCase : str = 150
_lowerCamelCase : Dict = "ade20k-id2label.json"
_lowerCamelCase : Dict = (1, 150, 128, 128)
elif "city" in model_name:
_lowerCamelCase : List[str] = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
_lowerCamelCase : Tuple = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = model_name[4:6]
_lowerCamelCase : Tuple = 1000
_lowerCamelCase : List[Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : List[Any] = (1, 1000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
_lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : int = 256
elif size == "b2":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : List[Any] = 768
_lowerCamelCase : Any = [3, 4, 6, 3]
elif size == "b3":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : Union[str, Any] = 768
_lowerCamelCase : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
_lowerCamelCase : str = [64, 128, 320, 512]
_lowerCamelCase : Optional[Any] = 768
_lowerCamelCase : Dict = [3, 8, 27, 3]
elif size == "b5":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : Tuple = 768
_lowerCamelCase : Tuple = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_lowerCamelCase : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
# prepare image
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
else:
_lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
_lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase )
else:
_lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
_lowerCamelCase : Any = model(_lowerCamelCase )
_lowerCamelCase : Dict = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCamelCase : int = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCamelCase : Dict = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCamelCase : Optional[int] = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCamelCase : List[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
_lowerCamelCase : Dict = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 340 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A_ ( lowerCamelCase__ ):
lowerCAmelCase__ = 'gpt_neo'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=50_257 ,__lowerCAmelCase: Optional[Any]=2_048 ,__lowerCAmelCase: List[str]=2_048 ,__lowerCAmelCase: Any=24 ,__lowerCAmelCase: Tuple=[[["global", "local"], 12]] ,__lowerCAmelCase: Union[str, Any]=16 ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: int=256 ,__lowerCAmelCase: List[Any]="gelu_new" ,__lowerCAmelCase: int=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: List[str]=1e-5 ,__lowerCAmelCase: Any=0.02 ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: int=50_256 ,__lowerCAmelCase: Optional[Any]=50_256 ,**__lowerCAmelCase: int ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Optional[int] = num_layers
_lowerCamelCase : Any = num_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Dict = window_size
_lowerCamelCase : Union[str, Any] = activation_function
_lowerCamelCase : Dict = resid_dropout
_lowerCamelCase : int = embed_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : str = classifier_dropout
_lowerCamelCase : List[str] = layer_norm_epsilon
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : str = bos_token_id
_lowerCamelCase : Optional[int] = eos_token_id
_lowerCamelCase : Dict = attention_types
_lowerCamelCase : int = self.expand_attention_types_params(__lowerCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
@staticmethod
def _lowercase ( __lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
import torch
_lowerCamelCase : Optional[int] = input.size()
_lowerCamelCase : List[Any] = len(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = shape[dimension]
_lowerCamelCase : Tuple = torch.arange(0 , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : str = torch.div(sizedim - size , _lowerCAmelCase , rounding_mode="floor" ) + 1
_lowerCamelCase : int = torch.arange(_lowerCAmelCase ) + low_indices[:min_length][:, None]
_lowerCamelCase : str = [slice(_lowerCAmelCase )] * rank
_lowerCamelCase : Union[str, Any] = indices
_lowerCamelCase : Tuple = input[s]
_lowerCamelCase : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_lowerCAmelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
import torch
_lowerCamelCase : Dict = torch.arange(1 , _lowerCAmelCase )
_lowerCamelCase : int = torch.remainder(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = remainders == 0
_lowerCamelCase : Dict = candidates[divisor_indices]
_lowerCamelCase : Optional[Any] = torch.max(_lowerCAmelCase )
return largest_divisor, torch.div(_lowerCAmelCase , _lowerCAmelCase , rounding_mode="floor" )
class A_ ( lowerCamelCase__ ):
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase ,direction="inputs" )
_lowerCamelCase : int = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCamelCase : str = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return self._config.num_heads
def _lowercase ( self: List[str] ,__lowerCAmelCase: PreTrainedTokenizer ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[TensorType] = None ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = super(__lowerCAmelCase ,self ).generate_dummy_inputs(
__lowerCAmelCase ,batch_size=__lowerCAmelCase ,seq_length=__lowerCAmelCase ,is_pair=__lowerCAmelCase ,framework=__lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
_lowerCamelCase : str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCamelCase : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCamelCase : Tuple = seqlen + 2
_lowerCamelCase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCamelCase : Union[str, Any] = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers )
]
_lowerCamelCase : Optional[Any] = common_inputs["attention_mask"]
if self.use_past:
_lowerCamelCase : Optional[Any] = ordered_inputs["attention_mask"].dtype
_lowerCamelCase : str = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCAmelCase ,__lowerCAmelCase ,dtype=__lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
return 13 | 354 |
"""simple docstring"""
_lowerCAmelCase : dict[tuple[int, int, int], int] = {}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowerCamelCase : Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 )
_lowerCamelCase : List[Any] = state_late + state_absent + state_ontime
_lowerCamelCase : int = prizestrings
return prizestrings
def lowerCamelCase_( _lowerCamelCase = 30 ) -> int:
'''simple docstring'''
return _calculate(_lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 340 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
_lowerCamelCase : str = sorted(string.lower() )
return len(_lowerCamelCase ) == len(set(_lowerCamelCase ) )
if __name__ == "__main__":
_lowerCAmelCase : int = input('''Enter a string ''').strip()
_lowerCAmelCase : Dict = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''') | 355 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : int = str(_lowerCamelCase )
return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" )
def lowerCamelCase_( ) -> int | None:
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
_lowerCamelCase : Union[str, Any] = 100002 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_lowerCamelCase : Tuple = 1002003 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 340 | 0 |
"""simple docstring"""
import unittest
import numpy as np
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase : Dict = np.shape(__UpperCAmelCase )
_lowerCamelCase : Optional[Any] = np.shape(__UpperCAmelCase )
_lowerCamelCase : Dict = np.shape(__UpperCAmelCase )
if shape_a[0] != shape_b[0]:
_lowerCamelCase : Union[str, Any] = (
"Expected the same number of rows for A and B. "
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__UpperCAmelCase )
if shape_b[1] != shape_c[1]:
_lowerCamelCase : Dict = (
"Expected the same number of columns for B and C. "
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__UpperCAmelCase )
_lowerCamelCase : int = pseudo_inv
if a_inv is None:
try:
_lowerCamelCase : Dict = np.linalg.inv(__UpperCAmelCase )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class A_ ( unittest.TestCase ):
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : str = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : Tuple = np.array([[2, 1], [6, 3]] )
_lowerCamelCase : Union[str, Any] = schur_complement(lowercase_ ,lowercase_ ,lowercase_ )
_lowerCamelCase : List[Any] = np.block([[a, b], [b.T, c]] )
_lowerCamelCase : List[str] = np.linalg.det(lowercase_ )
_lowerCamelCase : int = np.linalg.det(lowercase_ )
_lowerCamelCase : List[Any] = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ ,det_a * det_s )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ ,lowercase_ ,lowercase_ )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ ,lowercase_ ,lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 356 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( _a ):
lowerCAmelCase__ = 'char'
lowerCAmelCase__ = 'bpe'
lowerCAmelCase__ = 'wp'
_lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( _a ):
lowerCAmelCase__ = ['image_processor', 'char_tokenizer']
lowerCAmelCase__ = 'ViTImageProcessor'
lowerCAmelCase__ = 'MgpstrTokenizer'
def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,__lowerCAmelCase ,)
_lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" )
_lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is not None:
_lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCamelCase : Tuple = encodings["input_ids"]
return inputs
def _lowercase ( self: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences
_lowerCamelCase : Dict = char_preds.size(0 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" )
_lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" )
_lowerCamelCase : List[str] = []
_lowerCamelCase : str = []
for i in range(__lowerCAmelCase ):
_lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = final_strs
_lowerCamelCase : int = final_scores
_lowerCamelCase : str = char_strs
_lowerCamelCase : Dict = bpe_strs
_lowerCamelCase : int = wp_strs
return out
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_lowerCamelCase : int = self.char_decode
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Optional[int] = "[s]"
elif format == DecodeType.BPE:
_lowerCamelCase : Dict = self.bpe_decode
_lowerCamelCase : str = 2
_lowerCamelCase : Union[str, Any] = "#"
elif format == DecodeType.WORDPIECE:
_lowerCamelCase : int = self.wp_decode
_lowerCamelCase : List[str] = 102
_lowerCamelCase : List[Any] = "[SEP]"
else:
raise ValueError(F"""Format {format} is not supported.""" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], []
_lowerCamelCase : Any = pred_logits.size(0 )
_lowerCamelCase : int = pred_logits.size(1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:]
_lowerCamelCase : List[str] = decoder(__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 )
_lowerCamelCase : Any = preds_max_prob[:, 1:]
for index in range(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_str[index][:pred_eos]
_lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist()
_lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1
_lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
_lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCAmelCase )
conf_scores.append(__lowerCAmelCase )
return dec_strs, conf_scores
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs | 340 | 0 |
"""simple docstring"""
_lowerCAmelCase : List[str] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowerCAmelCase : list[bool | None] = [None] * 1000_0000
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[int] = False
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCamelCase : Union[str, Any] = chain(next_number(lowercase_ ) )
_lowerCamelCase : Union[str, Any] = number_chain
while number < 10000000:
_lowerCamelCase : Tuple = number_chain
number *= 10
return number_chain
def lowerCamelCase_( _lowerCamelCase = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , lowercase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''') | 357 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_lowerCAmelCase : List[Any] = get_logger(__name__)
class A_ :
lowerCAmelCase__ = 'dummy_data'
lowerCAmelCase__ = 'datasets'
lowerCAmelCase__ = False
def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,):
'''simple docstring'''
_lowerCamelCase : str = 0
_lowerCamelCase : List[str] = dataset_name
_lowerCamelCase : Optional[int] = cache_dir
_lowerCamelCase : Optional[int] = use_local_dummy_data
_lowerCamelCase : int = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : Tuple = str(__lowerCAmelCase )
# to be downloaded
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self._dummy_file is None:
_lowerCamelCase : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : Optional[int] = cached_path(
__lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase )
return os.path.join(__lowerCAmelCase ,self.dummy_file_name )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self._bucket_url is None:
_lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,(list, tuple) ):
return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase )
else:
return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return path
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return {}
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
for single_url in single_urls:
download_callback(__lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = single_urls
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Union[str, Any] = single_urls
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) )
_lowerCamelCase : List[Any] = value
# make sure that values are unique
if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url )
_lowerCamelCase : Optional[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__lowerCAmelCase )
return dummy_data_list
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
def _iter_archive_members(__lowerCAmelCase: Any ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : Tuple = Path(self.dummy_file ).parent
_lowerCamelCase : str = path.relative_to(__lowerCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase )
_lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__lowerCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
def __init__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {}
def _lowercase ( self: int ,__lowerCAmelCase: str ,__lowerCAmelCase: Any ,__lowerCAmelCase: Union[str, Any]=1 ):
'''simple docstring'''
if self.graph.get(lowercase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_lowerCamelCase : Optional[Any] = [[w, v]]
if not self.graph.get(lowercase_ ):
_lowerCamelCase : Tuple = []
def _lowercase ( self: Any ):
'''simple docstring'''
return list(self.graph )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str=-2 ,__lowerCAmelCase: Dict=-1 ):
'''simple docstring'''
if s == d:
return []
_lowerCamelCase : List[str] = []
_lowerCamelCase : Tuple = []
if s == -2:
_lowerCamelCase : str = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
_lowerCamelCase : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
_lowerCamelCase : Optional[int] = stack[len(lowercase_ ) - 1]
else:
_lowerCamelCase : Any = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def _lowercase ( self: List[Any] ,__lowerCAmelCase: List[Any]=-1 ):
'''simple docstring'''
if c == -1:
_lowerCamelCase : Dict = floor(random() * 10_000 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_lowerCamelCase : Any = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ ,lowercase_ ,1 )
def _lowercase ( self: str ,__lowerCAmelCase: int=-2 ):
'''simple docstring'''
_lowerCamelCase : Any = deque()
_lowerCamelCase : Optional[Any] = []
if s == -2:
_lowerCamelCase : Any = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
_lowerCamelCase : List[str] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowercase ( self: str ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return len(self.graph[u] )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: List[str]=-2 ):
'''simple docstring'''
_lowerCamelCase : str = []
_lowerCamelCase : str = []
if s == -2:
_lowerCamelCase : List[str] = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
_lowerCamelCase : str = s
_lowerCamelCase : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase_ ) != 0:
_lowerCamelCase : List[str] = stack[len(lowercase_ ) - 1]
else:
_lowerCamelCase : Optional[Any] = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return sorted_nodes
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : str = []
_lowerCamelCase : str = []
_lowerCamelCase : str = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
_lowerCamelCase : Union[str, Any] = -2
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Tuple = s
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCamelCase : Tuple = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCamelCase : int = True
if len(lowercase_ ) != 0:
_lowerCamelCase : Optional[int] = stack[len(lowercase_ ) - 1]
else:
_lowerCamelCase : Optional[Any] = False
indirect_parents.append(lowercase_ )
_lowerCamelCase : str = s
_lowerCamelCase : Optional[int] = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Optional[Any] = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
_lowerCamelCase : Optional[Any] = -2
_lowerCamelCase : List[str] = []
_lowerCamelCase : Tuple = s
_lowerCamelCase : Any = False
_lowerCamelCase : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCamelCase : Union[str, Any] = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCamelCase : List[Any] = True
if len(lowercase_ ) != 0:
_lowerCamelCase : Optional[Any] = stack[len(lowercase_ ) - 1]
else:
_lowerCamelCase : Union[str, Any] = False
indirect_parents.append(lowercase_ )
_lowerCamelCase : Optional[Any] = s
_lowerCamelCase : Tuple = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Dict=-2 ,__lowerCAmelCase: List[str]=-1 ):
'''simple docstring'''
_lowerCamelCase : str = time()
self.dfs(lowercase_ ,lowercase_ )
_lowerCamelCase : int = time()
return end - begin
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int]=-2 ):
'''simple docstring'''
_lowerCamelCase : List[str] = time()
self.bfs(lowercase_ )
_lowerCamelCase : Union[str, Any] = time()
return end - begin
class A_ :
def __init__( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
def _lowercase ( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: str=1 ):
'''simple docstring'''
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_lowerCamelCase : List[Any] = [[w, v]]
# add the other way
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_lowerCamelCase : Dict = [[w, u]]
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
# the other way round
if self.graph.get(lowercase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase_ )
def _lowercase ( self: int ,__lowerCAmelCase: str=-2 ,__lowerCAmelCase: str=-1 ):
'''simple docstring'''
if s == d:
return []
_lowerCamelCase : Tuple = []
_lowerCamelCase : int = []
if s == -2:
_lowerCamelCase : str = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
_lowerCamelCase : Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
_lowerCamelCase : Optional[Any] = stack[len(lowercase_ ) - 1]
else:
_lowerCamelCase : Optional[int] = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any]=-1 ):
'''simple docstring'''
if c == -1:
_lowerCamelCase : int = floor(random() * 10_000 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_lowerCamelCase : List[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ ,lowercase_ ,1 )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[Any]=-2 ):
'''simple docstring'''
_lowerCamelCase : Tuple = deque()
_lowerCamelCase : List[str] = []
if s == -2:
_lowerCamelCase : Union[str, Any] = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
_lowerCamelCase : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowercase ( self: Any ,__lowerCAmelCase: int ):
'''simple docstring'''
return len(self.graph[u] )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = []
_lowerCamelCase : Optional[int] = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
_lowerCamelCase : int = -2
_lowerCamelCase : str = []
_lowerCamelCase : Optional[Any] = s
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCamelCase : Union[str, Any] = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCamelCase : int = True
if len(lowercase_ ) != 0:
_lowerCamelCase : Union[str, Any] = stack[len(lowercase_ ) - 1]
else:
_lowerCamelCase : Union[str, Any] = False
indirect_parents.append(lowercase_ )
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : str = []
_lowerCamelCase : str = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
_lowerCamelCase : str = -2
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Optional[int] = s
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCamelCase : List[str] = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCamelCase : str = True
if len(lowercase_ ) != 0:
_lowerCamelCase : int = stack[len(lowercase_ ) - 1]
else:
_lowerCamelCase : int = False
indirect_parents.append(lowercase_ )
_lowerCamelCase : List[Any] = s
_lowerCamelCase : str = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return list(self.graph )
def _lowercase ( self: Any ,__lowerCAmelCase: List[str]=-2 ,__lowerCAmelCase: Tuple=-1 ):
'''simple docstring'''
_lowerCamelCase : List[str] = time()
self.dfs(lowercase_ ,lowercase_ )
_lowerCamelCase : Optional[Any] = time()
return end - begin
def _lowercase ( self: Tuple ,__lowerCAmelCase: List[str]=-2 ):
'''simple docstring'''
_lowerCamelCase : int = time()
self.bfs(lowercase_ )
_lowerCamelCase : Optional[int] = time()
return end - begin | 358 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_lowerCamelCase : int = precision
_lowerCamelCase : Dict = ceil(precision / 14 )
_lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
_lowerCamelCase : int = 1
_lowerCamelCase : Optional[int] = 13591409
_lowerCamelCase : int = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''') | 340 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : str = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'dpr'
def __init__( self: List[Any] ,__lowerCAmelCase: List[str]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Optional[Any]=12 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Union[str, Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Dict=512 ,__lowerCAmelCase: int=2 ,__lowerCAmelCase: Dict=0.02 ,__lowerCAmelCase: str=1e-12 ,__lowerCAmelCase: str=0 ,__lowerCAmelCase: List[str]="absolute" ,__lowerCAmelCase: int = 0 ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__A ,**__A )
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Dict = type_vocab_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : Union[str, Any] = projection_dim
_lowerCamelCase : str = position_embedding_type | 359 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowerCamelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCamelCase : Any = i / num_diffusion_timesteps
_lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class A_ ( _a , _a ):
@register_to_config
def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
_lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 1.0 - self.betas
_lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 )
_lowerCamelCase : int = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase : Tuple = 1.0
# setable values
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() )
_lowerCamelCase : List[str] = variance_type
def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
return sample
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ):
'''simple docstring'''
_lowerCamelCase : str = num_inference_steps
_lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ):
'''simple docstring'''
if prev_timestep is None:
_lowerCamelCase : List[str] = t - 1
_lowerCamelCase : Optional[int] = self.alphas_cumprod[t]
_lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : List[Any] = self.betas[t]
else:
_lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) )
_lowerCamelCase : str = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase : str = variance.log()
_lowerCamelCase : str = beta.log()
_lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2
_lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 )
else:
_lowerCamelCase : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase : List[Any] = t - 1
_lowerCamelCase : Dict = self.alphas_cumprod[t]
_lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Any = self.betas[t]
_lowerCamelCase : str = self.alphas[t]
else:
_lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : List[Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Any = torch.clamp(
__lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase : Union[str, Any] = 0
if t > 0:
_lowerCamelCase : Dict = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device )
_lowerCamelCase : Any = self._get_variance(
__lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,)
if self.variance_type == "fixed_small_log":
_lowerCamelCase : Optional[Any] = variance
elif self.variance_type == "learned_range":
_lowerCamelCase : Optional[int] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
_lowerCamelCase : Dict = variance * variance_noise
_lowerCamelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,):
'''simple docstring'''
_lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
_lowerCamelCase : Any = timesteps.to(original_samples.device )
_lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 340 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = UnCLIPImageVariationPipeline
lowerCAmelCase__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowerCAmelCase__ = IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowerCAmelCase__ = False
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
return 32
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowercase ( self: str ):
'''simple docstring'''
return 100
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=32 ,intermediate_size=37 ,patch_size=1 ,)
return CLIPVisionModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Any = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
_lowerCamelCase : List[str] = UnCLIPTextProjModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
_lowerCamelCase : List[Any] = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowercase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
torch.manual_seed(1 )
_lowerCamelCase : List[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = self.dummy_decoder
_lowerCamelCase : Tuple = self.dummy_text_proj
_lowerCamelCase : Optional[Any] = self.dummy_text_encoder
_lowerCamelCase : Optional[Any] = self.dummy_tokenizer
_lowerCamelCase : Optional[Any] = self.dummy_super_res_first
_lowerCamelCase : Tuple = self.dummy_super_res_last
_lowerCamelCase : Tuple = UnCLIPScheduler(
variance_type="learned_range" ,prediction_type="epsilon" ,num_train_timesteps=1_000 ,)
_lowerCamelCase : int = UnCLIPScheduler(
variance_type="fixed_small_log" ,prediction_type="epsilon" ,num_train_timesteps=1_000 ,)
_lowerCamelCase : List[Any] = CLIPImageProcessor(crop_size=32 ,size=32 )
_lowerCamelCase : int = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Tuple=True ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
_lowerCamelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
if pil_image:
_lowerCamelCase : Union[str, Any] = input_image * 0.5 + 0.5
_lowerCamelCase : List[str] = input_image.clamp(0 ,1 )
_lowerCamelCase : Optional[Any] = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
_lowerCamelCase : Union[str, Any] = DiffusionPipeline.numpy_to_pil(_SCREAMING_SNAKE_CASE )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = """cpu"""
_lowerCamelCase : List[str] = self.get_dummy_components()
_lowerCamelCase : str = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Tuple = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ,pil_image=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Tuple = output.images
_lowerCamelCase : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ,pil_image=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Any = pipe(
**_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,)[0]
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : int = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = """cpu"""
_lowerCamelCase : List[Any] = self.get_dummy_components()
_lowerCamelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ,pil_image=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[str] = output.images
_lowerCamelCase : Any = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ,pil_image=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Tuple = pipe(
**_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,)[0]
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : Optional[int] = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : str = self.get_dummy_components()
_lowerCamelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Any = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ,pil_image=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : str = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
_lowerCamelCase : str = pipe(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Union[str, Any] = output.images
_lowerCamelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ,pil_image=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Dict = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
_lowerCamelCase : Dict = pipe(
**_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE ,)[0]
_lowerCamelCase : str = image[0, -3:, -3:, -1]
_lowerCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_lowerCamelCase : Any = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = torch.device("cpu" )
class A_ :
lowerCAmelCase__ = 1
_lowerCamelCase : List[Any] = self.get_dummy_components()
_lowerCamelCase : str = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_lowerCamelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
_lowerCamelCase : Tuple = pipe.decoder.dtype
_lowerCamelCase : Any = 1
_lowerCamelCase : List[str] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_lowerCamelCase : Dict = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ,device=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,latents=_SCREAMING_SNAKE_CASE ,scheduler=DummyScheduler() )
_lowerCamelCase : List[Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_lowerCamelCase : Optional[int] = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ,device=_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,latents=_SCREAMING_SNAKE_CASE ,scheduler=DummyScheduler() )
_lowerCamelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ,pil_image=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : str = pipe(
**_SCREAMING_SNAKE_CASE ,decoder_latents=_SCREAMING_SNAKE_CASE ,super_res_latents=_SCREAMING_SNAKE_CASE ).images
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ,pil_image=_SCREAMING_SNAKE_CASE )
# Don't pass image, instead pass embedding
_lowerCamelCase : Dict = pipeline_inputs.pop("image" )
_lowerCamelCase : int = pipe.image_encoder(_SCREAMING_SNAKE_CASE ).image_embeds
_lowerCamelCase : Dict = pipe(
**_SCREAMING_SNAKE_CASE ,decoder_latents=_SCREAMING_SNAKE_CASE ,super_res_latents=_SCREAMING_SNAKE_CASE ,image_embeddings=_SCREAMING_SNAKE_CASE ,).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_lowerCamelCase : str = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=_SCREAMING_SNAKE_CASE ,expected_max_diff=_SCREAMING_SNAKE_CASE )
@skip_mps
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = torch_device == """cpu"""
_lowerCamelCase : int = True
_lowerCamelCase : Optional[Any] = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=_SCREAMING_SNAKE_CASE ,relax_max_difference=_SCREAMING_SNAKE_CASE ,additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_lowerCamelCase : Optional[int] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_SCREAMING_SNAKE_CASE ,additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE ,)
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE )
@skip_mps
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowercase ( self: int ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
_lowerCamelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
_lowerCamelCase : Optional[Any] = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" ,torch_dtype=torch.floataa )
_lowerCamelCase : Dict = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_lowerCamelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
_lowerCamelCase : List[Any] = pipeline(
_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,output_type="np" ,)
_lowerCamelCase : str = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,15 ) | 360 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : str = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for attribute in key.split("." ):
_lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCamelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCamelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCamelCase : List[Any] = value
elif weight_type == "bias":
_lowerCamelCase : str = value
elif weight_type == "running_mean":
_lowerCamelCase : Optional[int] = value
elif weight_type == "running_var":
_lowerCamelCase : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase : int = value
elif weight_type == "inv_freq":
_lowerCamelCase : List[str] = value
else:
_lowerCamelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = fairseq_model.state_dict()
_lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCamelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCamelCase : int = True
if "*" in mapped_key:
_lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
_lowerCamelCase : int = None
elif "pos_bias_v" in name:
_lowerCamelCase : Any = None
elif "weight_g" in name:
_lowerCamelCase : Any = "weight_g"
elif "weight_v" in name:
_lowerCamelCase : Any = "weight_v"
elif "bias" in name:
_lowerCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : Dict = "weight"
elif "running_mean" in name:
_lowerCamelCase : str = "running_mean"
elif "inv_freq" in name:
_lowerCamelCase : List[Any] = "inv_freq"
elif "running_var" in name:
_lowerCamelCase : Tuple = "running_var"
elif "num_batches_tracked" in name:
_lowerCamelCase : str = "num_batches_tracked"
else:
_lowerCamelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = full_name.split("conv_layers." )[-1]
_lowerCamelCase : List[Any] = name.split("." )
_lowerCamelCase : Union[str, Any] = int(items[0] )
_lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
_lowerCamelCase : Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCamelCase : List[Any] = "rotary"
if is_finetuned:
if dict_path:
_lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase : Optional[int] = target_dict.pad_index
_lowerCamelCase : Dict = target_dict.bos_index
_lowerCamelCase : Optional[Any] = target_dict.eos_index
_lowerCamelCase : str = len(target_dict.symbols )
_lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False
_lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" )
_lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCamelCase : Dict = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 340 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class A_ ( _a ):
def __init__( self: str ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: int ):
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." ,lowerCamelCase_ ,)
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
| 361 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if point:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for item in point:
if not isinstance(_lowerCamelCase , (int, float) ):
_lowerCamelCase : Dict = (
"Expected a list of numbers as input, found "
F"""{type(_lowerCamelCase ).__name__}"""
)
raise TypeError(_lowerCamelCase )
else:
_lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}"""
raise TypeError(_lowerCamelCase )
else:
raise ValueError("Missing an input" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 340 | 0 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ ( lowerCamelCase__ ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ ,"embed_dim" ) )
self.parent.assertTrue(hasattr(snake_case__ ,"num_heads" ) )
class A_ :
def __init__( self: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: int=64 ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: Dict=[16, 48, 96] ,__lowerCAmelCase: List[str]=[1, 3, 6] ,__lowerCAmelCase: int=[1, 2, 10] ,__lowerCAmelCase: Union[str, Any]=[7, 3, 3] ,__lowerCAmelCase: Optional[Any]=[4, 2, 2] ,__lowerCAmelCase: List[str]=[2, 1, 1] ,__lowerCAmelCase: Optional[Any]=[2, 2, 2] ,__lowerCAmelCase: Dict=[False, False, True] ,__lowerCAmelCase: Dict=[0.0, 0.0, 0.0] ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: List[str]=1e-12 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: List[Any]=2 ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : int = image_size
_lowerCamelCase : Union[str, Any] = patch_sizes
_lowerCamelCase : int = patch_stride
_lowerCamelCase : Optional[Any] = patch_padding
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[str] = num_labels
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = embed_dim
_lowerCamelCase : str = num_heads
_lowerCamelCase : Union[str, Any] = stride_kv
_lowerCamelCase : int = depth
_lowerCamelCase : Dict = cls_token
_lowerCamelCase : int = attention_drop_rate
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def _lowercase ( self: List[str] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = CvtModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase : Optional[Any] = model(snake_case__ )
_lowerCamelCase : Dict = (self.image_size, self.image_size)
_lowerCamelCase : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_lowerCamelCase : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_lowerCamelCase : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : str = CvtForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase : Dict = model(snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase : Optional[int] = config_and_inputs
_lowerCamelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = CvtModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self ,config_class=snake_case__ ,has_text_modality=snake_case__ ,hidden_size=37 )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self: str ):
'''simple docstring'''
return
@unittest.skip(reason="Cvt does not output attentions" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(snake_case__ )
_lowerCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,snake_case__ )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _lowercase ( self: Tuple ):
'''simple docstring'''
def check_hidden_states_output(__lowerCAmelCase: Dict ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: str ):
_lowerCamelCase : Tuple = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_lowerCamelCase : str = model(**self._prepare_for_class(snake_case__ ,snake_case__ ) )
_lowerCamelCase : List[str] = outputs.hidden_states
_lowerCamelCase : Tuple = len(self.model_tester.depth )
self.assertEqual(len(snake_case__ ) ,snake_case__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : int = True
check_hidden_states_output(snake_case__ ,snake_case__ ,snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
check_hidden_states_output(snake_case__ ,snake_case__ ,snake_case__ )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = CvtModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[Any] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case__ )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : List[Any] = image_processor(images=snake_case__ ,return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCamelCase : int = model(**snake_case__ )
# verify the logits
_lowerCamelCase : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,snake_case__ )
_lowerCamelCase : List[str] = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,snake_case__ ,atol=1e-4 ) )
| 362 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = np.inf
def set_batch_size(_lowerCamelCase ) -> None:
nonlocal batch_size
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary":
_lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowerCamelCase , _lowerCamelCase )
return None if batch_size is np.inf else batch_size
class A_ ( _a ):
def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(
__lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths}
_lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1]
_lowerCamelCase : int = Parquet(
cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if self.streaming:
_lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,)
_lowerCamelCase : Any = self.builder.as_dataset(
split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : Any = dataset
_lowerCamelCase : Any = path_or_buf
_lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features )
_lowerCamelCase : List[str] = parquet_writer_kwargs
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"wb+" ) as buffer:
_lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
else:
_lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
return written
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase )
_lowerCamelCase : List[str] = self.dataset.features.arrow_schema
_lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,):
_lowerCamelCase : List[str] = query_table(
table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written | 340 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = n * (n + 1) * (2 * n + 1) / 6
_lowerCamelCase : Optional[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''') | 363 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"]
_lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] )
return output
_lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments)
_lowerCAmelCase : Optional[int] = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase : Any = multiprocessing.cpu_count()
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : Any = time.time()
_lowerCAmelCase : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 340 | 0 |
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase : List[Any] = list[tuple[int, int]]
_lowerCAmelCase : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowerCAmelCase : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A_ :
def __init__( self: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : Dict = pos_x
_lowerCamelCase : Any = pos_y
_lowerCamelCase : Dict = (pos_y, pos_x)
_lowerCamelCase : List[Any] = goal_x
_lowerCamelCase : str = goal_y
_lowerCamelCase : Optional[Any] = g_cost
_lowerCamelCase : Tuple = parent
_lowerCamelCase : int = self.calculate_heuristic()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = abs(self.pos_x - self.goal_x )
_lowerCamelCase : List[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self: Any ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.f_cost < other.f_cost
class A_ :
def __init__( self: Tuple ,__lowerCAmelCase: str ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,a__ )
_lowerCamelCase : Any = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99_999 ,a__ )
_lowerCamelCase : List[Any] = [self.start]
_lowerCamelCase : Tuple = []
_lowerCamelCase : str = False
def _lowercase ( self: str ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCamelCase : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_lowerCamelCase : str = True
return self.retrace_path(a__ )
self.closed_nodes.append(a__ )
_lowerCamelCase : int = self.get_successors(a__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a__ )
else:
# retrieve the best current path
_lowerCamelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(a__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a__ )
else:
self.open_nodes.append(a__ )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self: List[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for action in delta:
_lowerCamelCase : int = parent.pos_x + action[1]
_lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a__ ,a__ ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,a__ ,) )
return successors
def _lowercase ( self: str ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = node
_lowerCamelCase : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCamelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_lowerCAmelCase : str = (0, 0)
_lowerCAmelCase : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
_lowerCAmelCase : List[str] = GreedyBestFirst(init, goal)
_lowerCAmelCase : Optional[int] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_lowerCAmelCase : Optional[Any] = 2
for elem in grid:
print(elem)
| 364 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 340 | 0 |
"""simple docstring"""
import numpy as np
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1e-12 , _lowerCamelCase = 100 , ) -> tuple[float, np.ndarray]:
'''simple docstring'''
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
_lowerCamelCase : Union[str, Any] = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowerCamelCase : str = False
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Dict = 1e12
while not convergence:
# Multiple matrix by the vector.
_lowerCamelCase : str = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
_lowerCamelCase : List[Any] = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowerCamelCase : str = vector.conj().T if is_complex else vector.T
_lowerCamelCase : Tuple = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
_lowerCamelCase : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Tuple = lambda_
if is_complex:
_lowerCamelCase : Dict = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : str = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowerCamelCase : str = np.array([41, 4, 20] )
_lowerCamelCase : Union[str, Any] = real_input_matrix.astype(np.complexaaa )
_lowerCamelCase : Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowerCamelCase : Dict = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowerCamelCase : List[Any] = real_input_matrix
_lowerCamelCase : Tuple = real_vector
elif problem_type == "complex":
_lowerCamelCase : Tuple = complex_input_matrix
_lowerCamelCase : str = complex_vector
# Our implementation.
_lowerCamelCase, _lowerCamelCase : str = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowerCamelCase, _lowerCamelCase : Optional[Any] = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
_lowerCamelCase : Optional[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowerCamelCase : Optional[int] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration() | 365 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = 'masked_bert'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = pruning_method
_lowerCamelCase : str = mask_init
_lowerCamelCase : List[Any] = mask_scale | 340 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = old_name
if "patch_embed" in old_name:
_lowerCamelCase : int = old_name.split("." )
if layer == "0":
_lowerCamelCase : Optional[int] = old_name.replace("0" , "convolution1" )
elif layer == "1":
_lowerCamelCase : List[Any] = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
_lowerCamelCase : int = old_name.replace("3" , "convolution2" )
else:
_lowerCamelCase : int = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , lowerCAmelCase__ ):
_lowerCamelCase : int = r"""\b\d{2}\b"""
if bool(re.search(lowerCAmelCase__ , lowerCAmelCase__ ) ):
_lowerCamelCase : Optional[int] = re.search(R"\d\.\d\d." , lowerCAmelCase__ ).group()
else:
_lowerCamelCase : str = re.search(R"\d\.\d." , lowerCAmelCase__ ).group()
if int(match[0] ) < 6:
_lowerCamelCase : Optional[int] = old_name.replace(lowerCAmelCase__ , "" )
_lowerCamelCase : Optional[int] = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
_lowerCamelCase : Tuple = """intermediate_stages.""" + trimmed_name
else:
_lowerCamelCase : Union[str, Any] = old_name.replace(lowerCAmelCase__ , "" )
if int(match[2] ) < num_meta4D_last_stage:
_lowerCamelCase : Optional[int] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
_lowerCamelCase : str = str(int(match[2] ) - num_meta4D_last_stage )
_lowerCamelCase : Any = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
_lowerCamelCase : Optional[int] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
_lowerCamelCase : Optional[Any] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
_lowerCamelCase : Dict = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
_lowerCamelCase : Any = trimmed_name.replace("fc2" , "linear_out" )
_lowerCamelCase : str = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R".\d." , lowerCAmelCase__ ):
_lowerCamelCase : Tuple = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
_lowerCamelCase : List[Any] = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_lowerCamelCase : List[str] = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_lowerCamelCase : Union[str, Any] = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
_lowerCamelCase : List[str] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
_lowerCamelCase : Optional[int] = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
_lowerCamelCase : Tuple = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
_lowerCamelCase : Optional[int] = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_lowerCamelCase : Any = new_name.replace("norm" , "layernorm" )
_lowerCamelCase : str = """efficientformer.""" + new_name
else:
_lowerCamelCase : Optional[Any] = """efficientformer.encoder.""" + new_name
return new_name
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
for key in checkpoint.copy().keys():
_lowerCamelCase : Tuple = checkpoint.pop(lowerCAmelCase__ )
_lowerCamelCase : Tuple = val
return checkpoint
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : int = torch.load(lowerCAmelCase__ , map_location="cpu" )["""model"""]
_lowerCamelCase : int = EfficientFormerConfig.from_json_file(lowerCAmelCase__ )
_lowerCamelCase : Any = EfficientFormerForImageClassificationWithTeacher(lowerCAmelCase__ )
_lowerCamelCase : Dict = """_""".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
_lowerCamelCase : List[str] = config.depths[-1] - config.num_metaad_blocks + 1
_lowerCamelCase : Any = convert_torch_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
_lowerCamelCase : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : List[Any] = 256
_lowerCamelCase : Any = 224
_lowerCamelCase : Dict = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
_lowerCamelCase : Union[str, Any] = processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values
# original processing pipeline
_lowerCamelCase : Tuple = Compose(
[
Resize(lowerCAmelCase__ , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCAmelCase__ ),
ToTensor(),
Normalize(lowerCAmelCase__ , lowerCAmelCase__ ),
] )
_lowerCamelCase : Dict = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
_lowerCamelCase : List[str] = model(lowerCAmelCase__ )
_lowerCamelCase : Union[str, Any] = outputs.logits
_lowerCamelCase : Optional[Any] = (1, 1000)
if "l1" in model_name:
_lowerCamelCase : int = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_lowerCamelCase : Union[str, Any] = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_lowerCamelCase : Tuple = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCAmelCase__ )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCAmelCase__ , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
_lowerCAmelCase : int = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
) | 366 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = random.Random()
_lowerCamelCase : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[int] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : List[str] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = max_length
_lowerCamelCase : Tuple = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = max_length
_lowerCamelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[int] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = max_length
_lowerCamelCase : Optional[Any] = 0.8
_lowerCamelCase : Union[str, Any] = 10
_lowerCamelCase : List[str] = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : str = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = 2
_lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : Optional[Any] = "Hello world"
_lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : List[str] = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
from math import sqrt
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE__ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE__ ):
total += i
return total - n
def lowerCamelCase_( _lowerCamelCase = 10000 ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE__ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE__ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 367 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A_ ( _a ):
lowerCAmelCase__ = 'mobilenet_v1'
def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = depth_multiplier
_lowerCamelCase : Any = min_depth
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Dict = tf_padding
_lowerCamelCase : Union[str, Any] = classifier_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
class A_ ( _a ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return 1e-4 | 340 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
assert column_title.isupper()
_lowerCamelCase : Tuple = 0
_lowerCamelCase : int = len(lowerCamelCase_ ) - 1
_lowerCamelCase : Any = 0
while index >= 0:
_lowerCamelCase : Dict = (ord(column_title[index] ) - 64) * pow(26 , lowerCamelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 368 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
for param in module.parameters():
_lowerCamelCase : Optional[int] = False
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_lowerCamelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Dict = plt.imshow(_lowerCamelCase )
fig.axes.get_xaxis().set_visible(_lowerCamelCase )
fig.axes.get_yaxis().set_visible(_lowerCamelCase )
plt.show()
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Tuple = datetime.now()
_lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp | 340 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
def decorator(_lowerCamelCase ):
_lowerCamelCase : str = getattr(__UpperCamelCase , "handle_key" , [] )
handle += [key]
setattr(__UpperCamelCase , "handle_key" , __UpperCamelCase )
return func
return decorator
def lowerCamelCase_( *_lowerCamelCase ) -> List[str]:
'''simple docstring'''
def decorator(_lowerCamelCase ):
_lowerCamelCase : str = getattr(__UpperCamelCase , "handle_key" , [] )
handle += keys
setattr(__UpperCamelCase , "handle_key" , __UpperCamelCase )
return func
return decorator
class A_ ( __a ):
def __new__( cls: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = super().__new__(cls ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
if not hasattr(UpperCamelCase__ ,"key_handler" ):
setattr(UpperCamelCase__ ,"key_handler" ,{} )
setattr(UpperCamelCase__ ,"handle_input" ,KeyHandler.handle_input )
for value in attrs.values():
_lowerCamelCase : Tuple = getattr(UpperCamelCase__ ,"handle_key" ,[] )
for key in handled_keys:
_lowerCamelCase : Union[str, Any] = value
return new_cls
@staticmethod
def _lowercase ( cls: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = get_character()
if char != KEYMAP["undefined"]:
_lowerCamelCase : Any = ord(UpperCamelCase__ )
_lowerCamelCase : Optional[Any] = cls.key_handler.get(UpperCamelCase__ )
if handler:
_lowerCamelCase : Optional[int] = char
return handler(cls )
else:
return None
def lowerCamelCase_( cls ) -> Dict:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 369 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCamelCase : list = []
for char_count in range(_lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''') | 340 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A_ :
"""simple docstring"""
@staticmethod
def _lowercase ( *__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _lowercase ( self: Tuple ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ObjectDetectionPipeline(model=_lowercase ,image_processor=_lowercase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _lowercase ( self: int ,__lowerCAmelCase: str ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Tuple = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" ,threshold=0.0 )
self.assertGreater(len(_lowercase ) ,0 )
for detected_object in outputs:
self.assertEqual(
_lowercase ,{
"score": ANY(_lowercase ),
"label": ANY(_lowercase ),
"box": {"xmin": ANY(_lowercase ), "ymin": ANY(_lowercase ), "xmax": ANY(_lowercase ), "ymax": ANY(_lowercase )},
} ,)
import datasets
_lowerCamelCase : int = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" ,"image" ,split="test" )
_lowerCamelCase : Tuple = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_lowerCamelCase : Any = object_detector(_lowercase ,threshold=0.0 )
self.assertEqual(len(_lowercase ) ,len(_lowercase ) )
for outputs in batch_outputs:
self.assertGreater(len(_lowercase ) ,0 )
for detected_object in outputs:
self.assertEqual(
_lowercase ,{
"score": ANY(_lowercase ),
"label": ANY(_lowercase ),
"box": {"xmin": ANY(_lowercase ), "ymin": ANY(_lowercase ), "xmax": ANY(_lowercase ), "ymax": ANY(_lowercase )},
} ,)
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@require_torch
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Tuple = "hf-internal-testing/tiny-detr-mobilenetsv3"
_lowerCamelCase : int = AutoModelForObjectDetection.from_pretrained(_lowercase )
_lowerCamelCase : int = AutoFeatureExtractor.from_pretrained(_lowercase )
_lowerCamelCase : int = ObjectDetectionPipeline(model=_lowercase ,feature_extractor=_lowercase )
_lowerCamelCase : List[str] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ,threshold=0.0 )
self.assertEqual(
nested_simplify(_lowercase ,decimals=4 ) ,[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] ,)
_lowerCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(_lowercase ,decimals=4 ) ,[
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] ,)
@require_torch
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = "facebook/detr-resnet-50"
_lowerCamelCase : Optional[Any] = AutoModelForObjectDetection.from_pretrained(_lowercase )
_lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(_lowercase )
_lowerCamelCase : Dict = ObjectDetectionPipeline(model=_lowercase ,feature_extractor=_lowercase )
_lowerCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_lowercase ,decimals=4 ) ,[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] ,)
_lowerCamelCase : Optional[Any] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_lowercase ,decimals=4 ) ,[
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] ,)
@require_torch
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = "facebook/detr-resnet-50"
_lowerCamelCase : Tuple = pipeline("object-detection" ,model=_lowercase )
_lowerCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_lowercase ,decimals=4 ) ,[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] ,)
_lowerCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_lowercase ,decimals=4 ) ,[
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] ,)
@require_torch
@slow
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0.99_85
_lowerCamelCase : Union[str, Any] = "facebook/detr-resnet-50"
_lowerCamelCase : str = pipeline("object-detection" ,model=_lowercase )
_lowerCamelCase : Optional[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ,threshold=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ,decimals=4 ) ,[
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Any = "Narsil/layoutlmv3-finetuned-funsd"
_lowerCamelCase : Tuple = 0.99_93
_lowerCamelCase : Optional[int] = pipeline("object-detection" ,model=_lowercase ,threshold=_lowercase )
_lowerCamelCase : Optional[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(_lowercase ,decimals=4 ) ,[
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] ,) | 370 |
"""simple docstring"""
_lowerCAmelCase : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Any = [False] * len(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = [s]
_lowerCamelCase : str = True
while queue:
_lowerCamelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCamelCase : Any = True
_lowerCamelCase : Any = u
return visited[t]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase ))
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = float("Inf" )
_lowerCamelCase : Dict = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_lowerCamelCase : Optional[Any] = sink
while v != source:
_lowerCamelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : List[str] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 340 | 0 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = []
for rt in rc.restypes:
_lowerCamelCase : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowerCamelCase : Union[str, Any] = {name: i for i, name in enumerate(a__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_lowerCamelCase : Union[str, Any] = torch.tensor(
a__ , dtype=torch.intaa , device=protein["aatype"].device , )
_lowerCamelCase : str = torch.tensor(
a__ , dtype=torch.intaa , device=protein["aatype"].device , )
_lowerCamelCase : Any = torch.tensor(
a__ , dtype=torch.floataa , device=protein["aatype"].device , )
_lowerCamelCase : Optional[int] = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowerCamelCase : List[str] = restype_atomaa_to_atomaa[protein_aatype]
_lowerCamelCase : Any = restype_atomaa_mask[protein_aatype]
_lowerCamelCase : List[Any] = residx_atomaa_mask
_lowerCamelCase : List[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowerCamelCase : List[Any] = restype_atomaa_to_atomaa[protein_aatype]
_lowerCamelCase : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowerCamelCase : int = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowerCamelCase : Optional[Any] = rc.restype_atoa[restype_letter]
_lowerCamelCase : Tuple = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowerCamelCase : List[str] = rc.atom_order[atom_name]
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = restype_atomaa_mask[protein_aatype]
_lowerCamelCase : List[Any] = residx_atomaa_mask
return protein
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Dict = tree_map(lambda _lowerCamelCase : torch.tensor(a__ , device=batch["aatype"].device ) , a__ , np.ndarray )
_lowerCamelCase : Dict = tensor_tree_map(lambda _lowerCamelCase : np.array(a__ ) , make_atomaa_masks(a__ ) )
return out | 371 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 340 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A_ ( snake_case_ ):
lowerCAmelCase__ = "vit_msn"
def __init__( self: List[str] ,__lowerCAmelCase: List[Any]=768 ,__lowerCAmelCase: Tuple=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: int="gelu" ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Optional[int]=0.0 ,__lowerCAmelCase: int=0.02 ,__lowerCAmelCase: Any=1e-06 ,__lowerCAmelCase: List[Any]=224 ,__lowerCAmelCase: Tuple=16 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Union[str, Any]=True ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Optional[int] = patch_size
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Tuple = qkv_bias | 350 |
"""simple docstring"""
from collections import defaultdict
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(_lowerCamelCase )
if ret % 2 == 0:
cuts.append(_lowerCamelCase )
return ret
def lowerCamelCase_( ) -> int:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9
_lowerCAmelCase : str = defaultdict(list)
_lowerCAmelCase : dict[int, bool] = {}
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 340 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.