code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import sys _lowerCAmelCase : str = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : List[str] = 1 for digit in s: product *= int(_lowerCamelCase ) return product def lowerCamelCase_( _lowerCamelCase = N ) -> int: '''simple docstring''' _lowerCamelCase : List[str] = -sys.maxsize - 1 _lowerCamelCase : int = n[:13] _lowerCamelCase : int = 13 while cur_index < len(_lowerCamelCase ) - 13: if int(n[cur_index] ) >= int(substr[0] ): _lowerCamelCase : Optional[Any] = substr[1:] + n[cur_index] cur_index += 1 else: _lowerCamelCase : Tuple = max(_lowerCamelCase , str_eval(_lowerCamelCase ) ) _lowerCamelCase : int = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'camembert' def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Tuple = position_embedding_type _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Dict = classifier_dropout class A_ ( _a ): @property def _lowercase ( self: Any ): '''simple docstring''' if self.task == "multiple-choice": _lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: _lowerCamelCase : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
340
1
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin _lowerCAmelCase : int = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''') class A_ ( _a , unittest.TestCase ): lowerCAmelCase__ = BartphoTokenizer lowerCAmelCase__ = False lowerCAmelCase__ = True def _lowercase ( self: Union[str, Any] ): '''simple docstring''' super().setUp() _lowerCamelCase : Any = ["▁This", "▁is", "▁a", "▁t", "est"] _lowerCamelCase : List[str] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) ) _lowerCamelCase : List[str] = {"unk_token": "<unk>"} _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["monolingual_vocab_file"] ) with open(self.monolingual_vocab_file ,"w" ,encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(F"""{token} {vocab_tokens[token]}\n""" ) _lowerCamelCase : Union[str, Any] = BartphoTokenizer(__lowerCAmelCase ,self.monolingual_vocab_file ,**self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self: Any ,**__lowerCAmelCase: List[Any] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = "This is a là test" _lowerCamelCase : int = "This is a<unk><unk> test" return input_text, output_text def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : List[Any] = BartphoTokenizer(__lowerCAmelCase ,self.monolingual_vocab_file ,**self.special_tokens_map ) _lowerCamelCase : Union[str, Any] = "This is a là test" _lowerCamelCase : List[str] = "▁This ▁is ▁a ▁l à ▁t est".split() _lowerCamelCase : Tuple = tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Optional[int] = tokens + [tokenizer.unk_token] _lowerCamelCase : Any = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,__lowerCAmelCase )
340
"""simple docstring""" from collections import defaultdict def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : str = True for v in tree[start]: if v not in visited: ret += dfs(_lowerCamelCase ) if ret % 2 == 0: cuts.append(_lowerCamelCase ) return ret def lowerCamelCase_( ) -> int: '''simple docstring''' dfs(1 ) if __name__ == "__main__": _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9 _lowerCAmelCase : str = defaultdict(list) _lowerCAmelCase : dict[int, bool] = {} _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Any = 0 _lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
1
"""simple docstring""" from __future__ import annotations import math def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _lowerCamelCase : str = u for i in range(1 , _lowerCamelCase ): _lowerCamelCase : List[Any] = temp * (u - i) return temp def lowerCamelCase_( ) -> None: '''simple docstring''' _lowerCamelCase : List[Any] = int(input("enter the numbers of values: " ) ) _lowerCamelCase : list[list[float]] = [] for _ in range(_lowerCamelCase ): y.append([] ) for i in range(_lowerCamelCase ): for j in range(_lowerCamelCase ): y[i].append(_lowerCamelCase ) _lowerCamelCase : List[str] = 0 print("enter the values of parameters in a list: " ) _lowerCamelCase : Union[str, Any] = list(map(_lowerCamelCase , input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(_lowerCamelCase ): _lowerCamelCase : Union[str, Any] = float(input() ) _lowerCamelCase : Optional[Any] = int(input("enter the value to interpolate: " ) ) _lowerCamelCase : Tuple = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , _lowerCamelCase ): for j in range(n - i ): _lowerCamelCase : Any = y[j + 1][i - 1] - y[j][i - 1] _lowerCamelCase : Optional[int] = y[0][0] for i in range(1 , _lowerCamelCase ): summ += (ucal(_lowerCamelCase , _lowerCamelCase ) * y[0][i]) / math.factorial(_lowerCamelCase ) print(F"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
340
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__''' _lowerCAmelCase : Dict = '''Dummy User''' _lowerCAmelCase : Optional[int] = '''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''' _lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co''' _lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(_lowerCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def lowerCamelCase_( ) -> str: '''simple docstring''' return HfApi(endpoint=_lowerCamelCase ) @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Dict = HfFolder.get_token() HfFolder.save_token(_lowerCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' def _cleanup_repo(_lowerCamelCase ): hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' @contextmanager def _temporary_repo(_lowerCamelCase ): try: yield repo_id finally: cleanup_repo(_lowerCamelCase ) return _temporary_repo @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
340
1
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path _lowerCAmelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) _lowerCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] _lowerCAmelCase : set[int] = {ord(char) for char in VALID_CHARS} _lowerCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | None: '''simple docstring''' _lowerCamelCase : str = "" _lowerCamelCase : int _lowerCamelCase : int _lowerCamelCase : int for keychar, cipherchar in zip(cycle(_lowerCamelCase ) , _lowerCamelCase ): _lowerCamelCase : Dict = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(_lowerCamelCase ) return decoded def lowerCamelCase_( _lowerCamelCase ) -> list[str]: '''simple docstring''' _lowerCamelCase : list[str] = [] for key in product(_lowerCamelCase , repeat=3 ): _lowerCamelCase : Union[str, Any] = try_key(_lowerCamelCase , _lowerCamelCase ) if encoded is not None: possibles.append(_lowerCamelCase ) return possibles def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]: '''simple docstring''' return [possible for possible in possibles if common_word in possible.lower()] def lowerCamelCase_( _lowerCamelCase = "p059_cipher.txt" ) -> int: '''simple docstring''' _lowerCamelCase : list[int] _lowerCamelCase : list[str] _lowerCamelCase : str _lowerCamelCase : str _lowerCamelCase : str = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding="utf-8" ) _lowerCamelCase : List[Any] = [int(_lowerCamelCase ) for number in data.strip().split("," )] _lowerCamelCase : List[str] = filter_valid_chars(_lowerCamelCase ) for common_word in COMMON_WORDS: _lowerCamelCase : int = filter_common_word(_lowerCamelCase , _lowerCamelCase ) if len(_lowerCamelCase ) == 1: break _lowerCamelCase : Dict = possibles[0] return sum(ord(_lowerCamelCase ) for char in decoded_text ) if __name__ == "__main__": print(f'''{solution() = }''')
340
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class A_ ( _a ): def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() _lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ): _lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,) # merge samples if i == 0: _lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample else: _lowerCamelCase : Optional[int] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,): '''simple docstring''' _lowerCamelCase : List[Any] = 0 _lowerCamelCase : str = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,) idx += 1 _lowerCamelCase : int = model_path_to_save + F"""_{idx}""" @classmethod def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : int = 0 _lowerCamelCase : str = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _lowerCamelCase : Dict = pretrained_model_path while os.path.isdir(__lowerCAmelCase ): _lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) controlnets.append(__lowerCAmelCase ) idx += 1 _lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" ) if len(__lowerCAmelCase ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(__lowerCAmelCase )
340
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase : Union[str, Any] = { '''vocab_file''': { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt''' ), } } _lowerCAmelCase : int = { '''junnyu/roformer_chinese_small''': 1536, '''junnyu/roformer_chinese_base''': 1536, '''junnyu/roformer_chinese_char_small''': 512, '''junnyu/roformer_chinese_char_base''': 512, '''junnyu/roformer_small_discriminator''': 128, '''junnyu/roformer_small_generator''': 128, } _lowerCAmelCase : List[Any] = { '''junnyu/roformer_chinese_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_base''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True}, '''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True}, '''junnyu/roformer_small_generator''': {'''do_lower_case''': True}, } class A_ ( _a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase__ = RoFormerTokenizer def __init__( self: int ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Optional[Any]="[UNK]" ,__lowerCAmelCase: Any="[SEP]" ,__lowerCAmelCase: Optional[int]="[PAD]" ,__lowerCAmelCase: List[Any]="[CLS]" ,__lowerCAmelCase: Any="[MASK]" ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: List[Any]=None ,**__lowerCAmelCase: Union[str, Any] ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,tokenizer_file=__lowerCAmelCase ,do_lower_case=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,sep_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,cls_token=__lowerCAmelCase ,mask_token=__lowerCAmelCase ,tokenize_chinese_chars=__lowerCAmelCase ,strip_accents=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("lowercase" ,__lowerCAmelCase ) != do_lower_case or pre_tok_state.get("strip_accents" ,__lowerCAmelCase ) != strip_accents ): _lowerCamelCase : Any = getattr(__lowerCAmelCase ,pre_tok_state.pop("type" ) ) _lowerCamelCase : List[str] = do_lower_case _lowerCamelCase : Dict = strip_accents _lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase ) _lowerCamelCase : int = do_lower_case def __getstate__( self: Any ): '''simple docstring''' _lowerCamelCase : str = self.__dict__.copy() _lowerCamelCase : Optional[Any] = BertPreTokenizer() return state def __setstate__( self: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Any = d _lowerCamelCase : List[str] = self.__dict__["_tokenizer"].get_vocab() _lowerCamelCase : int = PreTokenizer.custom(JiebaPreTokenizer(__lowerCAmelCase ) ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int]=None ): '''simple docstring''' _lowerCamelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self: Tuple ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ): '''simple docstring''' _lowerCamelCase : Dict = [self.sep_token_id] _lowerCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self._tokenizer.model.save(__lowerCAmelCase ,name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: Tuple=False ,**__lowerCAmelCase: int ,): '''simple docstring''' _lowerCamelCase : int = BertPreTokenizer() return super().save_pretrained(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase )
340
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _lowerCamelCase : Tuple = "segformer.encoder." + key if key.startswith("backbone" ): _lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" ) if "norm" in key: _lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" ) if "layer_norm1" in key: _lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )] _lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" ) if "attn.q" in key: _lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _lowerCamelCase : Tuple = key.replace("attn" , "attention.self" ) if "fc1" in key: _lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" ) if "fc2" in key: _lowerCamelCase : Dict = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _lowerCamelCase : int = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" ) _lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )] _lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" ) if key.startswith("head" ): _lowerCamelCase : List[str] = key.replace("head" , "classifier" ) _lowerCamelCase : Union[str, Any] = value return new_state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _lowerCamelCase : int = kv_weight[ : config.hidden_sizes[i], : ] _lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]] _lowerCamelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCamelCase : Optional[Any] = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase_( ) -> Dict: '''simple docstring''' _lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return image @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Any = SegformerConfig() _lowerCamelCase : int = False # set attributes based on model_name _lowerCamelCase : Any = "huggingface/label-files" if "segformer" in model_name: _lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _lowerCamelCase : str = 150 _lowerCamelCase : Dict = "ade20k-id2label.json" _lowerCamelCase : Dict = (1, 150, 128, 128) elif "city" in model_name: _lowerCamelCase : List[str] = 19 _lowerCamelCase : Tuple = "cityscapes-id2label.json" _lowerCamelCase : Tuple = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: _lowerCamelCase : List[str] = True _lowerCamelCase : Tuple = model_name[4:6] _lowerCamelCase : Tuple = 1000 _lowerCamelCase : List[Any] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : int = 256 elif size == "b2": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : List[Any] = 768 _lowerCamelCase : Any = [3, 4, 6, 3] elif size == "b3": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : Union[str, Any] = 768 _lowerCamelCase : Optional[Any] = [3, 4, 18, 3] elif size == "b4": _lowerCamelCase : str = [64, 128, 320, 512] _lowerCamelCase : Optional[Any] = 768 _lowerCamelCase : Dict = [3, 8, 27, 3] elif size == "b5": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : Tuple = 768 _lowerCamelCase : Tuple = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) _lowerCamelCase : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase ) # prepare image _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: _lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) ) else: _lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_lowerCamelCase , _lowerCamelCase ) # create HuggingFace model and load state dict if encoder_only: _lowerCamelCase : Tuple = False _lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase ) else: _lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() # forward pass _lowerCamelCase : Any = model(_lowerCamelCase ) _lowerCamelCase : Dict = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCamelCase : str = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]], [[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]], [[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCamelCase : int = torch.tensor( [ [[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]], [[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]], [[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]], [[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]], [[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]], [[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]], [[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]], [[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]], [[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCamelCase : Dict = torch.tensor( [ [[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]], [[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]], [[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCamelCase : Optional[int] = torch.tensor( [ [[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]], [[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]], [[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]], [[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]], [[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCamelCase : List[Any] = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]], [[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]], [[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCamelCase : Any = torch.tensor( [ [[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]], [[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]], [[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]], [[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]], [[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCamelCase : str = torch.tensor( [ [[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]], [[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]], [[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]], ] ) else: _lowerCamelCase : Dict = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : str = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
340
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCAmelCase : str = 16 _lowerCAmelCase : List[Any] = 32 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 16 ) -> Tuple: '''simple docstring''' _lowerCamelCase : int = AutoTokenizer.from_pretrained("bert-base-cased" ) _lowerCamelCase : Union[str, Any] = load_dataset("glue" , "mrpc" ) def tokenize_function(_lowerCamelCase ): # max_length=None => use the model max length (it's actually the default) _lowerCamelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowerCamelCase : int = datasets.map( _lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCamelCase : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_lowerCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowerCamelCase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowerCamelCase : List[Any] = 16 elif accelerator.mixed_precision != "no": _lowerCamelCase : Optional[Any] = 8 else: _lowerCamelCase : str = None return tokenizer.pad( _lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , ) # Instantiate dataloaders. _lowerCamelCase : List[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) _lowerCamelCase : Optional[Any] = DataLoader( tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCAmelCase : Tuple = mocked_dataloaders # noqa: F811 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1": _lowerCamelCase : Union[str, Any] = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _lowerCamelCase : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir ) else: _lowerCamelCase : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCamelCase : str = config["lr"] _lowerCamelCase : Optional[int] = int(config["num_epochs"] ) _lowerCamelCase : List[str] = int(config["seed"] ) _lowerCamelCase : Optional[Any] = int(config["batch_size"] ) set_seed(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase : List[Any] = get_dataloaders(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : str = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation _lowerCamelCase : Tuple = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _lowerCamelCase : List[Any] = batch_size // MAX_GPU_BATCH_SIZE _lowerCamelCase : Optional[Any] = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowerCamelCase : List[str] = model.to(accelerator.device ) # Instantiate optimizer _lowerCamelCase : Tuple = AdamW(params=model.parameters() , lr=_lowerCamelCase ) # Instantiate scheduler _lowerCamelCase : Dict = get_linear_schedule_with_warmup( optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _lowerCamelCase : Any = os.path.split(_lowerCamelCase )[-1].split("." )[0] accelerator.init_trackers(_lowerCamelCase , _lowerCamelCase ) # Now we train the model for epoch in range(_lowerCamelCase ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _lowerCamelCase : Optional[int] = 0 for step, batch in enumerate(_lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _lowerCamelCase : Optional[int] = model(**_lowerCamelCase ) _lowerCamelCase : str = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _lowerCamelCase : int = loss / gradient_accumulation_steps accelerator.backward(_lowerCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _lowerCamelCase : str = model(**_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = outputs.logits.argmax(dim=-1 ) _lowerCamelCase, _lowerCamelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_lowerCamelCase , references=_lowerCamelCase , ) _lowerCamelCase : Any = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , _lowerCamelCase ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(_lowerCamelCase ), "epoch": epoch, } , step=_lowerCamelCase , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowerCamelCase_( ) -> int: '''simple docstring''' _lowerCamelCase : Tuple = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) parser.add_argument( "--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , ) parser.add_argument( "--project_dir" , type=_lowerCamelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , ) _lowerCamelCase : List[Any] = parser.parse_args() _lowerCamelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": main()
340
"""simple docstring""" _lowerCAmelCase : dict[tuple[int, int, int], int] = {} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _lowerCamelCase : Optional[int] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 ) _lowerCamelCase : List[Any] = state_late + state_absent + state_ontime _lowerCamelCase : int = prizestrings return prizestrings def lowerCamelCase_( _lowerCamelCase = 30 ) -> int: '''simple docstring''' return _calculate(_lowerCamelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
340
1
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _lowerCAmelCase : Tuple = logging.get_logger(__name__) @add_end_docstrings(_a ) class A_ ( _a ): def __init__( self: Dict ,**__lowerCAmelCase: str ): '''simple docstring''' super().__init__(**__lowerCAmelCase ) requires_backends(self ,"vision" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self: Any ,__lowerCAmelCase: Union[str, List[str], "Image", List["Image"]] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' return super().__call__(__lowerCAmelCase ,**__lowerCAmelCase ) def _lowercase ( self: Dict ,**__lowerCAmelCase: List[Any] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = {} if "candidate_labels" in kwargs: _lowerCamelCase : Dict = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: _lowerCamelCase : Any = kwargs["hypothesis_template"] return preprocess_params, {}, {} def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Optional[int]="This is a photo of {}." ): '''simple docstring''' _lowerCamelCase : Optional[int] = load_image(__lowerCAmelCase ) _lowerCamelCase : List[str] = self.image_processor(images=[image] ,return_tensors=self.framework ) _lowerCamelCase : Optional[Any] = candidate_labels _lowerCamelCase : Any = [hypothesis_template.format(__lowerCAmelCase ) for x in candidate_labels] _lowerCamelCase : str = self.tokenizer(__lowerCAmelCase ,return_tensors=self.framework ,padding=__lowerCAmelCase ) _lowerCamelCase : List[str] = [text_inputs] return inputs def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Optional[int] = model_inputs.pop("candidate_labels" ) _lowerCamelCase : str = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] ,__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = text_inputs[0] else: # Batching case. _lowerCamelCase : Optional[int] = text_inputs[0][0] _lowerCamelCase : List[Any] = self.model(**__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : Dict = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def _lowercase ( self: int ,__lowerCAmelCase: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : int = model_outputs.pop("candidate_labels" ) _lowerCamelCase : Tuple = model_outputs["logits"][0] if self.framework == "pt": _lowerCamelCase : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 ) _lowerCamelCase : Optional[int] = probs.tolist() if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : str = [scores] elif self.framework == "tf": _lowerCamelCase : int = stable_softmax(__lowerCAmelCase ,axis=-1 ) _lowerCamelCase : int = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) _lowerCamelCase : int = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(__lowerCAmelCase ,__lowerCAmelCase ) ,key=lambda __lowerCAmelCase : -x[0] ) ] return result
340
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : int = str(_lowerCamelCase ) return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" ) def lowerCamelCase_( ) -> int | None: '''simple docstring''' for base_num in range(9999 , 4999 , -1 ): _lowerCamelCase : Union[str, Any] = 100002 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate for base_num in range(333 , 99 , -1 ): _lowerCamelCase : Tuple = 1002003 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) _lowerCamelCase : list = [] for char_count in range(_lowerCamelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_lowerCamelCase ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
340
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A_ ( _a ): lowerCAmelCase__ = 'char' lowerCAmelCase__ = 'bpe' lowerCAmelCase__ = 'wp' _lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A_ ( _a ): lowerCAmelCase__ = ['image_processor', 'char_tokenizer'] lowerCAmelCase__ = 'ViTImageProcessor' lowerCAmelCase__ = 'MgpstrTokenizer' def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,__lowerCAmelCase ,) _lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" ) _lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) _lowerCamelCase : List[str] = tokenizer _lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" ) _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ): '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is not None: _lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _lowerCamelCase : Tuple = encodings["input_ids"] return inputs def _lowercase ( self: int ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences _lowerCamelCase : Dict = char_preds.size(0 ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" ) _lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" ) _lowerCamelCase : List[str] = [] _lowerCamelCase : str = [] for i in range(__lowerCAmelCase ): _lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowerCamelCase : Tuple = {} _lowerCamelCase : Tuple = final_strs _lowerCamelCase : int = final_scores _lowerCamelCase : str = char_strs _lowerCamelCase : Dict = bpe_strs _lowerCamelCase : int = wp_strs return out def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ): '''simple docstring''' if format == DecodeType.CHARACTER: _lowerCamelCase : int = self.char_decode _lowerCamelCase : List[str] = 1 _lowerCamelCase : Optional[int] = "[s]" elif format == DecodeType.BPE: _lowerCamelCase : Dict = self.bpe_decode _lowerCamelCase : str = 2 _lowerCamelCase : Union[str, Any] = "#" elif format == DecodeType.WORDPIECE: _lowerCamelCase : int = self.wp_decode _lowerCamelCase : List[str] = 102 _lowerCamelCase : List[Any] = "[SEP]" else: raise ValueError(F"""Format {format} is not supported.""" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Any = pred_logits.size(0 ) _lowerCamelCase : int = pred_logits.size(1 ) _lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:] _lowerCamelCase : List[str] = decoder(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 ) _lowerCamelCase : Any = preds_max_prob[:, 1:] for index in range(__lowerCAmelCase ): _lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_str[index][:pred_eos] _lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist() _lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1 _lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1] _lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowerCAmelCase ) conf_scores.append(__lowerCAmelCase ) return dec_strs, conf_scores def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(__lowerCAmelCase ) def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs
340
1
"""simple docstring""" import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : str = UniSpeechSatForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase ) _lowerCamelCase : int = downstream_dict["projector.weight"] _lowerCamelCase : List[str] = downstream_dict["projector.bias"] _lowerCamelCase : Optional[Any] = downstream_dict["model.post_net.linear.weight"] _lowerCamelCase : Union[str, Any] = downstream_dict["model.post_net.linear.bias"] return model def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Union[str, Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase ) _lowerCamelCase : Optional[int] = downstream_dict["model.linear.weight"] _lowerCamelCase : List[Any] = downstream_dict["model.linear.bias"] return model def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : int = UniSpeechSatForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = downstream_dict["connector.weight"] _lowerCamelCase : List[str] = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _lowerCamelCase : List[Any] = downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] _lowerCamelCase : List[str] = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] _lowerCamelCase : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] _lowerCamelCase : int = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] _lowerCamelCase : Optional[int] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] _lowerCamelCase : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] _lowerCamelCase : Dict = downstream_dict["objective.W"] return model @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[Any] = torch.load(_lowerCamelCase , map_location="cpu" ) _lowerCamelCase : Any = checkpoint["Downstream"] _lowerCamelCase : Tuple = UniSpeechSatConfig.from_pretrained(_lowerCamelCase ) _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained( _lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase ) _lowerCamelCase : List[str] = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): _lowerCamelCase : List[str] = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) elif arch.endswith("ForAudioFrameClassification" ): _lowerCamelCase : Optional[int] = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) elif arch.endswith("ForXVector" ): _lowerCamelCase : List[Any] = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: _lowerCamelCase : Any = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(_lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') _lowerCAmelCase : Optional[int] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
340
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _lowerCAmelCase : List[Any] = get_logger(__name__) class A_ : lowerCAmelCase__ = 'dummy_data' lowerCAmelCase__ = 'datasets' lowerCAmelCase__ = False def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,): '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : List[str] = dataset_name _lowerCamelCase : Optional[int] = cache_dir _lowerCamelCase : Optional[int] = use_local_dummy_data _lowerCamelCase : int = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : int = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : Tuple = str(__lowerCAmelCase ) # to be downloaded _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Dict = None @property def _lowercase ( self: str ): '''simple docstring''' if self._dummy_file is None: _lowerCamelCase : List[str] = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self: str ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : Optional[int] = cached_path( __lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase ) return os.path.join(__lowerCAmelCase ,self.dummy_file_name ) @property def _lowercase ( self: Tuple ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def _lowercase ( self: List[str] ): '''simple docstring''' if self._bucket_url is None: _lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Tuple = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,(list, tuple) ): return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase ) else: return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' return path def _lowercase ( self: Optional[int] ): '''simple docstring''' return {} def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): for single_url in single_urls: download_callback(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = single_urls download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls] else: _lowerCamelCase : Union[str, Any] = single_urls _lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) _lowerCamelCase : List[Any] = value # make sure that values are unique if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url ) _lowerCamelCase : Optional[Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__lowerCAmelCase ) return dummy_data_list def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' def _iter_archive_members(__lowerCAmelCase: Any ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : Tuple = Path(self.dummy_file ).parent _lowerCamelCase : str = path.relative_to(__lowerCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase ) _lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = [paths] for path in paths: if os.path.isfile(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__lowerCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
340
1
"""simple docstring""" import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class A_ ( _a ): def __init__( self: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: str=13 ,__lowerCAmelCase: Optional[Any]=7 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: int=True ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: int=99 ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: int=5 ,__lowerCAmelCase: Optional[int]=4 ,__lowerCAmelCase: List[Any]=37 ,__lowerCAmelCase: List[str]="gelu" ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: str=512 ,__lowerCAmelCase: int=16 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: int=False ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Union[str, Any]="None" ,__lowerCAmelCase: Any=3 ,__lowerCAmelCase: Any=4 ,__lowerCAmelCase: Tuple=None ,): '''simple docstring''' _lowerCamelCase : int = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : List[Any] = seq_length _lowerCamelCase : Dict = is_training _lowerCamelCase : Union[str, Any] = use_input_mask _lowerCamelCase : List[str] = use_token_type_ids _lowerCamelCase : List[Any] = use_labels _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : Any = intermediate_size _lowerCamelCase : Optional[int] = hidden_act _lowerCamelCase : Dict = hidden_dropout_prob _lowerCamelCase : Optional[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : Optional[Any] = type_vocab_size _lowerCamelCase : int = type_sequence_label_size _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : Optional[int] = num_labels _lowerCamelCase : Tuple = num_choices _lowerCamelCase : Union[str, Any] = relative_attention _lowerCamelCase : Any = position_biased_input _lowerCamelCase : List[str] = pos_att_type _lowerCamelCase : int = scope def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) _lowerCamelCase : Union[str, Any] = None if self.use_input_mask: _lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) _lowerCamelCase : str = None if self.use_token_type_ids: _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) _lowerCamelCase : int = None _lowerCamelCase : int = None _lowerCamelCase : str = None if self.use_labels: _lowerCamelCase : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) _lowerCamelCase : str = ids_tensor([self.batch_size] ,self.num_choices ) _lowerCamelCase : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self: List[str] ): '''simple docstring''' return DebertaVaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,) def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Tuple ): '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) ,[] ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : int = DebertaVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Any = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )[0] _lowerCamelCase : List[Any] = model(__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )[0] _lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = DebertaVaForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[str] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = self.num_labels _lowerCamelCase : Union[str, Any] = DebertaVaForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ) self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] ) self.check_loss_output(__lowerCAmelCase ) def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.num_labels _lowerCamelCase : str = DebertaVaForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[Any] = DebertaVaForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Any = model( __lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,start_positions=__lowerCAmelCase ,end_positions=__lowerCAmelCase ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _lowercase ( self: Tuple ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Any ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase : List[str] = DebertaVaForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() _lowerCamelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() _lowerCamelCase : Any = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() _lowerCamelCase : int = model( __lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : List[str] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ) : List[Any] = config_and_inputs _lowerCamelCase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A_ ( _a , _a , unittest.TestCase ): lowerCAmelCase__ = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) lowerCAmelCase__ = ( { 'feature-extraction': DebertaVaModel, 'fill-mask': DebertaVaForMaskedLM, 'question-answering': DebertaVaForQuestionAnswering, 'text-classification': DebertaVaForSequenceClassification, 'token-classification': DebertaVaForTokenClassification, 'zero-shot': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : List[str] = DebertaVaModelTester(self ) _lowerCamelCase : Union[str, Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 ) def _lowercase ( self: List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase ) @slow def _lowercase ( self: Tuple ): '''simple docstring''' for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : str = DebertaVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class A_ ( unittest.TestCase ): @unittest.skip(reason="Model not available yet" ) def _lowercase ( self: int ): '''simple docstring''' pass @slow def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : Tuple = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" ) _lowerCamelCase : Optional[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) _lowerCamelCase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _lowerCamelCase : Tuple = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase )[0] # compare the actual values for a slice. _lowerCamelCase : Any = torch.tensor( [[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,__lowerCAmelCase ,atol=1e-4 ) ,F"""{output[:, 1:4, 1:4]}""" )
340
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) _lowerCamelCase : int = precision _lowerCamelCase : Dict = ceil(precision / 14 ) _lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt() _lowerCamelCase : int = 1 _lowerCamelCase : Optional[int] = 13591409 _lowerCamelCase : int = Decimal(_lowerCamelCase ) for k in range(1 , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
340
1
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' for param in module.parameters(): _lowerCamelCase : Optional[int] = False def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Dict = plt.imshow(_lowerCamelCase ) fig.axes.get_xaxis().set_visible(_lowerCamelCase ) fig.axes.get_yaxis().set_visible(_lowerCamelCase ) plt.show() def lowerCamelCase_( ) -> str: '''simple docstring''' _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
340
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ ( _a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCamelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCamelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowerCamelCase : str = [] for i in range(_lowerCamelCase ): _lowerCamelCase : Any = i / num_diffusion_timesteps _lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) ) return torch.tensor(_lowerCamelCase , dtype=torch.floataa ) class A_ ( _a , _a ): @register_to_config def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) _lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = 1.0 - self.betas _lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 ) _lowerCamelCase : int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowerCamelCase : Tuple = 1.0 # setable values _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() ) _lowerCamelCase : List[str] = variance_type def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ): '''simple docstring''' return sample def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ): '''simple docstring''' _lowerCamelCase : str = num_inference_steps _lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ): '''simple docstring''' if prev_timestep is None: _lowerCamelCase : List[str] = t - 1 _lowerCamelCase : Optional[int] = self.alphas_cumprod[t] _lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : List[Any] = self.betas[t] else: _lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowerCamelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) ) _lowerCamelCase : str = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowerCamelCase : str = variance.log() _lowerCamelCase : str = beta.log() _lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2 _lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,): '''simple docstring''' _lowerCamelCase : str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 ) else: _lowerCamelCase : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: _lowerCamelCase : List[Any] = t - 1 _lowerCamelCase : Dict = self.alphas_cumprod[t] _lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Any = self.betas[t] _lowerCamelCase : str = self.alphas[t] else: _lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev _lowerCamelCase : Optional[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowerCamelCase : List[Any] = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowerCamelCase : Any = torch.clamp( __lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Union[str, Any] = 0 if t > 0: _lowerCamelCase : Dict = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device ) _lowerCamelCase : Any = self._get_variance( __lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,) if self.variance_type == "fixed_small_log": _lowerCamelCase : Optional[Any] = variance elif self.variance_type == "learned_range": _lowerCamelCase : Optional[int] = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) _lowerCamelCase : Dict = variance * variance_noise _lowerCamelCase : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,): '''simple docstring''' _lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) _lowerCamelCase : Any = timesteps.to(original_samples.device ) _lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 _lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
340
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer _lowerCAmelCase : Any = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast _lowerCAmelCase : int = TaTokenizerFast _lowerCAmelCase : List[Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : List[Any] = [ '''MT5EncoderModel''', '''MT5ForConditionalGeneration''', '''MT5ForQuestionAnswering''', '''MT5Model''', '''MT5PreTrainedModel''', '''MT5Stack''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[int] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[int] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model'''] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys _lowerCAmelCase : List[str] = _LazyModule( __name__, globals()['''__file__'''], _import_structure, extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast}, module_spec=__spec__, )
340
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : str = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' for attribute in key.split("." ): _lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: _lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: _lowerCamelCase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCamelCase : Tuple = value elif weight_type == "weight_g": _lowerCamelCase : List[str] = value elif weight_type == "weight_v": _lowerCamelCase : List[Any] = value elif weight_type == "bias": _lowerCamelCase : str = value elif weight_type == "running_mean": _lowerCamelCase : Optional[int] = value elif weight_type == "running_var": _lowerCamelCase : Optional[Any] = value elif weight_type == "num_batches_tracked": _lowerCamelCase : int = value elif weight_type == "inv_freq": _lowerCamelCase : List[str] = value else: _lowerCamelCase : Optional[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = fairseq_model.state_dict() _lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) _lowerCamelCase : List[Any] = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _lowerCamelCase : int = True if "*" in mapped_key: _lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2] _lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase ) if "pos_bias_u" in name: _lowerCamelCase : int = None elif "pos_bias_v" in name: _lowerCamelCase : Any = None elif "weight_g" in name: _lowerCamelCase : Any = "weight_g" elif "weight_v" in name: _lowerCamelCase : Any = "weight_v" elif "bias" in name: _lowerCamelCase : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCamelCase : Dict = "weight" elif "running_mean" in name: _lowerCamelCase : str = "running_mean" elif "inv_freq" in name: _lowerCamelCase : List[Any] = "inv_freq" elif "running_var" in name: _lowerCamelCase : Tuple = "running_var" elif "num_batches_tracked" in name: _lowerCamelCase : str = "num_batches_tracked" else: _lowerCamelCase : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = full_name.split("conv_layers." )[-1] _lowerCamelCase : List[Any] = name.split("." ) _lowerCamelCase : Union[str, Any] = int(items[0] ) _lowerCamelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCamelCase : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCamelCase : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCamelCase : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict: '''simple docstring''' if config_path is not None: _lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" ) else: _lowerCamelCase : Dict = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCamelCase : List[Any] = "rotary" if is_finetuned: if dict_path: _lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : Optional[int] = target_dict.pad_index _lowerCamelCase : Dict = target_dict.bos_index _lowerCamelCase : Optional[Any] = target_dict.eos_index _lowerCamelCase : str = len(target_dict.symbols ) _lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) _lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) _lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) _lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase ) else: _lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" ) _lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) _lowerCamelCase : Dict = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
340
1
"""simple docstring""" import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline _lowerCAmelCase : Optional[Any] = { '''n_samples''': 64, '''horizon''': 32, '''num_inference_steps''': 20, '''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network '''scale_grad_by_std''': True, '''scale''': 0.1, '''eta''': 0.0, '''t_grad_cutoff''': 2, '''device''': '''cpu''', } if __name__ == "__main__": _lowerCAmelCase : List[Any] = '''hopper-medium-v2''' _lowerCAmelCase : Optional[int] = gym.make(env_name) _lowerCAmelCase : Optional[int] = ValueGuidedRLPipeline.from_pretrained( '''bglick13/hopper-medium-v2-value-function-hor32''', env=env, ) env.seed(0) _lowerCAmelCase : Optional[int] = env.reset() _lowerCAmelCase : List[str] = 0 _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Optional[Any] = 1000 _lowerCAmelCase : int = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy _lowerCAmelCase : List[Any] = pipeline(obs, planning_horizon=32) # execute action in environment _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = env.step(denorm_actions) _lowerCAmelCase : Any = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' f''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) _lowerCAmelCase : str = next_observation except KeyboardInterrupt: pass print(f'''Total reward: {total_reward}''')
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) ) def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' if point: if isinstance(_lowerCamelCase , _lowerCamelCase ): for item in point: if not isinstance(_lowerCamelCase , (int, float) ): _lowerCamelCase : Dict = ( "Expected a list of numbers as input, found " F"""{type(_lowerCamelCase ).__name__}""" ) raise TypeError(_lowerCamelCase ) else: _lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}""" raise TypeError(_lowerCamelCase ) else: raise ValueError("Missing an input" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
340
1
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = { '''huggingface/time-series-transformer-tourism-monthly''': ( '''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json''' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A_ ( _a ): lowerCAmelCase__ = 'time_series_transformer' lowerCAmelCase__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self: List[str] ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: str = "student_t" ,__lowerCAmelCase: str = "nll" ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: List[int] = [1, 2, 3, 4, 5, 6, 7] ,__lowerCAmelCase: Optional[Union[str, bool]] = "mean" ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: Optional[List[int]] = None ,__lowerCAmelCase: Optional[List[int]] = None ,__lowerCAmelCase: int = 32 ,__lowerCAmelCase: int = 32 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: str = "gelu" ,__lowerCAmelCase: int = 64 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: float = 0.1 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 0.02 ,__lowerCAmelCase: Union[str, Any]=True ,**__lowerCAmelCase: int ,): '''simple docstring''' _lowerCamelCase : Optional[Any] = prediction_length _lowerCamelCase : List[Any] = context_length or prediction_length _lowerCamelCase : Dict = distribution_output _lowerCamelCase : List[str] = loss _lowerCamelCase : Tuple = input_size _lowerCamelCase : Optional[Any] = num_time_features _lowerCamelCase : List[str] = lags_sequence _lowerCamelCase : List[Any] = scaling _lowerCamelCase : List[str] = num_dynamic_real_features _lowerCamelCase : Optional[Any] = num_static_real_features _lowerCamelCase : Optional[int] = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(__lowerCAmelCase ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) _lowerCamelCase : str = cardinality else: _lowerCamelCase : Optional[int] = [0] if embedding_dimension and num_static_categorical_features > 0: if len(__lowerCAmelCase ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) _lowerCamelCase : List[str] = embedding_dimension else: _lowerCamelCase : str = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality] _lowerCamelCase : List[Any] = num_parallel_samples # Transformer architecture configuration _lowerCamelCase : Dict = input_size * len(__lowerCAmelCase ) + self._number_of_features _lowerCamelCase : Optional[int] = d_model _lowerCamelCase : List[Any] = encoder_attention_heads _lowerCamelCase : Union[str, Any] = decoder_attention_heads _lowerCamelCase : Optional[Any] = encoder_ffn_dim _lowerCamelCase : Optional[Any] = decoder_ffn_dim _lowerCamelCase : List[str] = encoder_layers _lowerCamelCase : List[str] = decoder_layers _lowerCamelCase : Union[str, Any] = dropout _lowerCamelCase : Optional[Any] = attention_dropout _lowerCamelCase : Tuple = activation_dropout _lowerCamelCase : Union[str, Any] = encoder_layerdrop _lowerCamelCase : str = decoder_layerdrop _lowerCamelCase : List[str] = activation_function _lowerCamelCase : Optional[Any] = init_std _lowerCamelCase : Tuple = use_cache super().__init__(is_encoder_decoder=__lowerCAmelCase ,**__lowerCAmelCase ) @property def _lowercase ( self: List[Any] ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
340
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[Any] = np.inf def set_batch_size(_lowerCamelCase ) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary": _lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_lowerCamelCase , _lowerCamelCase ) return None if batch_size is np.inf else batch_size class A_ ( _a ): def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths} _lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1] _lowerCamelCase : int = Parquet( cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,) def _lowercase ( self: Optional[int] ): '''simple docstring''' if self.streaming: _lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCamelCase : Tuple = None _lowerCamelCase : Optional[int] = None _lowerCamelCase : List[str] = None _lowerCamelCase : str = None self.builder.download_and_prepare( download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,) _lowerCamelCase : Any = self.builder.as_dataset( split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset class A_ : def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' _lowerCamelCase : Any = dataset _lowerCamelCase : Any = path_or_buf _lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features ) _lowerCamelCase : List[str] = parquet_writer_kwargs def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with open(self.path_or_buf ,"wb+" ) as buffer: _lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) else: _lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) return written def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[str] = 0 _lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase ) _lowerCamelCase : List[str] = self.dataset.features.arrow_schema _lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase ) for offset in logging.tqdm( range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,): _lowerCamelCase : List[str] = query_table( table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,) writer.write_table(__lowerCAmelCase ) written += batch.nbytes writer.close() return written
340
1
"""simple docstring""" import math def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' return math.sqrt(_lowerCamelCase ) * math.sqrt(_lowerCamelCase ) == num def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : List[str] = n while left <= right: _lowerCamelCase : Optional[int] = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: _lowerCamelCase : Tuple = mid - 1 else: _lowerCamelCase : Optional[Any] = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Optional[int] = {} _lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"] _lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] ) return output _lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments) _lowerCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: _lowerCAmelCase : Any = multiprocessing.cpu_count() _lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowerCAmelCase : Union[str, Any] = time.time() _lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''') print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') _lowerCAmelCase : Any = time.time() _lowerCAmelCase : Dict = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') _lowerCAmelCase : str = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
340
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer _lowerCAmelCase : int = logging.get_logger(__name__) _lowerCAmelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase : Union[str, Any] = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } _lowerCAmelCase : int = { '''distilbert-base-uncased''': 512, '''distilbert-base-uncased-distilled-squad''': 512, '''distilbert-base-cased''': 512, '''distilbert-base-cased-distilled-squad''': 512, '''distilbert-base-german-cased''': 512, '''distilbert-base-multilingual-cased''': 512, } _lowerCAmelCase : str = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class A_ ( _a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase__ = ['input_ids', 'attention_mask'] lowerCAmelCase__ = DistilBertTokenizer def __init__( self: List[str] ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: Dict="[UNK]" ,__lowerCAmelCase: List[Any]="[SEP]" ,__lowerCAmelCase: List[str]="[PAD]" ,__lowerCAmelCase: Union[str, Any]="[CLS]" ,__lowerCAmelCase: Optional[int]="[MASK]" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: List[Any]=None ,**__lowerCAmelCase: List[str] ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,tokenizer_file=__lowerCAmelCase ,do_lower_case=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,sep_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,cls_token=__lowerCAmelCase ,mask_token=__lowerCAmelCase ,tokenize_chinese_chars=__lowerCAmelCase ,strip_accents=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" ,__lowerCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" ,__lowerCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" ,__lowerCAmelCase ) != tokenize_chinese_chars ): _lowerCamelCase : int = getattr(__lowerCAmelCase ,normalizer_state.pop("type" ) ) _lowerCamelCase : Tuple = do_lower_case _lowerCamelCase : Optional[Any] = strip_accents _lowerCamelCase : Union[str, Any] = tokenize_chinese_chars _lowerCamelCase : Optional[Any] = normalizer_class(**__lowerCAmelCase ) _lowerCamelCase : Any = do_lower_case def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int=None ): '''simple docstring''' _lowerCamelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self: Tuple ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ): '''simple docstring''' _lowerCamelCase : str = [self.sep_token_id] _lowerCamelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self._tokenizer.model.save(__lowerCAmelCase ,name=__lowerCAmelCase ) return tuple(__lowerCAmelCase )
340
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[Any] = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
1
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class A_ ( _a ): lowerCAmelCase__ = 'EncodecFeatureExtractor' lowerCAmelCase__ = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: str ): '''simple docstring''' super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self.feature_extractor _lowerCamelCase : List[Any] = False def _lowercase ( self: Tuple ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[int]=True ): '''simple docstring''' return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase ,language=__lowerCAmelCase ,no_timestamps=__lowerCAmelCase ) def __call__( self: Optional[int] ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: Any ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = kwargs.pop("audio" ,__lowerCAmelCase ) _lowerCamelCase : List[Any] = kwargs.pop("sampling_rate" ,__lowerCAmelCase ) _lowerCamelCase : List[str] = kwargs.pop("text" ,__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _lowerCamelCase : List[str] = args[0] _lowerCamelCase : Dict = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: _lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase ,**__lowerCAmelCase ) if audio is not None: _lowerCamelCase : Dict = self.feature_extractor(__lowerCAmelCase ,*__lowerCAmelCase ,sampling_rate=__lowerCAmelCase ,**__lowerCAmelCase ) if audio is None: return inputs elif text is None: return audio_inputs else: _lowerCamelCase : Optional[Any] = audio_inputs["input_values"] if "padding_mask" in audio_inputs: _lowerCamelCase : int = audio_inputs["padding_mask"] return inputs def _lowercase ( self: List[str] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : int = kwargs.pop("audio" ,__lowerCAmelCase ) _lowerCamelCase : str = kwargs.pop("padding_mask" ,__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _lowerCamelCase : Tuple = args[0] _lowerCamelCase : Dict = args[1:] if audio_values is not None: return self._decode_audio(__lowerCAmelCase ,padding_mask=__lowerCAmelCase ) else: return self.tokenizer.batch_decode(*__lowerCAmelCase ,**__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: str ): '''simple docstring''' return self.tokenizer.decode(*__lowerCAmelCase ,**__lowerCAmelCase ) def _lowercase ( self: Tuple ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional = None ): '''simple docstring''' _lowerCamelCase : Tuple = to_numpy(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = audio_values.shape if padding_mask is None: return list(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = to_numpy(__lowerCAmelCase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) _lowerCamelCase : Union[str, Any] = seq_len - padding_mask.shape[-1] _lowerCamelCase : Any = 1 - self.feature_extractor.padding_value _lowerCamelCase : List[Any] = np.pad(__lowerCAmelCase ,((0, 0), (0, difference)) ,"constant" ,constant_values=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = audio_values.tolist() for i in range(__lowerCAmelCase ): _lowerCamelCase : Tuple = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] _lowerCamelCase : Union[str, Any] = sliced_audio.reshape(__lowerCAmelCase ,-1 ) return audio_values
340
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig _lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) class A_ ( _a ): lowerCAmelCase__ = 'masked_bert' def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : str = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : int = pruning_method _lowerCamelCase : str = mask_init _lowerCamelCase : List[Any] = mask_scale
340
1
"""simple docstring""" from random import shuffle import tensorflow as tf from numpy import array def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : Union[str, Any] = int(_lowerCamelCase ) assert noofclusters < len(_lowerCamelCase ) # Find out the dimensionality _lowerCamelCase : Tuple = len(vectors[0] ) # Will help select random centroids from among the available vectors _lowerCamelCase : str = list(range(len(_lowerCamelCase ) ) ) shuffle(_lowerCamelCase ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. _lowerCamelCase : List[Any] = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION _lowerCamelCase : Any = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points _lowerCamelCase : List[Any] = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase ) ] ##These nodes will assign the centroid Variables the appropriate ##values _lowerCamelCase : int = tf.placeholder("float64" , [dim] ) _lowerCamelCase : List[str] = [] for centroid in centroids: cent_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) _lowerCamelCase : Optional[int] = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )] ##These nodes will assign an assignment Variable the appropriate ##value _lowerCamelCase : Optional[Any] = tf.placeholder("int32" ) _lowerCamelCase : Tuple = [] for assignment in assignments: cluster_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input _lowerCamelCase : Union[str, Any] = tf.placeholder("float" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors _lowerCamelCase : List[Any] = tf.reduce_mean(_lowerCamelCase , 0 ) ##Node for computing Euclidean distances # Placeholders for input _lowerCamelCase : Union[str, Any] = tf.placeholder("float" , [dim] ) _lowerCamelCase : List[Any] = tf.placeholder("float" , [dim] ) _lowerCamelCase : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase , _lowerCamelCase ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input _lowerCamelCase : str = tf.placeholder("float" , [noofclusters] ) _lowerCamelCase : List[str] = tf.argmin(_lowerCamelCase , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. _lowerCamelCase : str = tf.initialize_all_variables() # Initialize all variables sess.run(_lowerCamelCase ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. _lowerCamelCase : Optional[Any] = 100 for _ in range(_lowerCamelCase ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(_lowerCamelCase ) ): _lowerCamelCase : List[Any] = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. _lowerCamelCase : List[str] = [ sess.run(_lowerCamelCase , feed_dict={va: vect, va: sess.run(_lowerCamelCase )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input _lowerCamelCase : List[str] = sess.run( _lowerCamelCase , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(_lowerCamelCase ): # Collect all the vectors assigned to this cluster _lowerCamelCase : Optional[int] = [ vectors[i] for i in range(len(_lowerCamelCase ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location _lowerCamelCase : Union[str, Any] = sess.run( _lowerCamelCase , feed_dict={mean_input: array(_lowerCamelCase )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments _lowerCamelCase : Any = sess.run(_lowerCamelCase ) _lowerCamelCase : Optional[int] = sess.run(_lowerCamelCase ) return centroids, assignments
340
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model _lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]: '''simple docstring''' if rng is None: _lowerCamelCase : Union[str, Any] = random.Random() _lowerCamelCase : Union[str, Any] = 1 for dim in shape: total_dims *= dim _lowerCamelCase : Optional[int] = [] for _ in range(_lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase ) return output def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase ) # make sure that at least one token is attended to for each batch _lowerCamelCase : List[str] = 1 return attn_mask @require_flax class A_ : lowerCAmelCase__ = None lowerCAmelCase__ = () def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _lowerCamelCase : List[str] = 2 _lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2 _lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length] _lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase ) _lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _lowerCamelCase : List[str] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = False _lowerCamelCase : Dict = max_length _lowerCamelCase : Tuple = 0 for model_class in self.all_generative_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval() _lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params ) _lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences _lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config() _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[int] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = max_length _lowerCamelCase : Dict = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[str] = model_class(__lowerCAmelCase ) _lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config() _lowerCamelCase : Tuple = False _lowerCamelCase : Union[str, Any] = max_length _lowerCamelCase : List[str] = 2 _lowerCamelCase : Optional[int] = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() _lowerCamelCase : int = True _lowerCamelCase : List[Any] = max_length _lowerCamelCase : Optional[Any] = 0.8 _lowerCamelCase : Union[str, Any] = 10 _lowerCamelCase : List[str] = 0.3 _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : str = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : int = jit(model.generate ) _lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[str] = max_length _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : Dict = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Any = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() _lowerCamelCase : Dict = max_length _lowerCamelCase : List[Any] = 2 _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[str] = 8 _lowerCamelCase : List[Any] = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : int = model_class(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : Dict = False _lowerCamelCase : Any = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : int = 2 _lowerCamelCase : int = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) @require_flax class A_ ( unittest.TestCase ): def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) _lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) _lowerCamelCase : Optional[Any] = "Hello world" _lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ): model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ): _lowerCamelCase : List[str] = {"foo": "bar"} model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
340
1
"""simple docstring""" import collections import importlib.util import os import re from pathlib import Path _lowerCAmelCase : Tuple = '''src/transformers''' # Matches is_xxx_available() _lowerCAmelCase : str = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} _lowerCAmelCase : Tuple = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _lowerCAmelCase : Union[str, Any] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available _lowerCAmelCase : Optional[int] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") _lowerCAmelCase : Any = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _lowerCAmelCase : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", _lowerCAmelCase : List[str] = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], _lowerCAmelCase : List[str] = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo _lowerCAmelCase : Union[str, Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: _lowerCAmelCase : List[Any] = re.compile(R'''^\s*try:''') # Catches a line with else: _lowerCAmelCase : int = re.compile(R'''^\s*else:''') def lowerCamelCase_( _lowerCamelCase ) -> Dict: '''simple docstring''' if _re_test_backend.search(_lowerCamelCase ) is None: return None _lowerCamelCase : Dict = [b[0] for b in _re_backend.findall(_lowerCamelCase )] backends.sort() return "_and_".join(_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase ) -> List[Any]: '''simple docstring''' with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f: _lowerCamelCase : str = f.readlines() _lowerCamelCase : int = 0 while line_index < len(_lowerCamelCase ) and not lines[line_index].startswith("_import_structure = {" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_lowerCamelCase ): return None # First grab the objects without a specific backend in _import_structure _lowerCamelCase : List[Any] = [] while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None: _lowerCamelCase : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_lowerCamelCase ): _lowerCamelCase : List[Any] = _re_one_line_import_struct.search(_lowerCamelCase ).groups()[0] _lowerCamelCase : Optional[Any] = re.findall("\[([^\]]+)\]" , _lowerCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", " )] ) line_index += 1 continue _lowerCamelCase : Optional[int] = _re_import_struct_key_value.search(_lowerCamelCase ) if single_line_import_search is not None: _lowerCamelCase : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(_lowerCamelCase ) > 0] objects.extend(_lowerCamelCase ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) line_index += 1 _lowerCamelCase : Any = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING" ): # If the line is an if not is_backend_available, we grab all objects associated. _lowerCamelCase : Tuple = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _lowerCamelCase : Any = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _lowerCamelCase : List[str] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ): _lowerCamelCase : Dict = lines[line_index] if _re_import_struct_add_one.search(_lowerCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(_lowerCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(_lowerCamelCase ) is not None: _lowerCamelCase : Optional[int] = _re_import_struct_add_many.search(_lowerCamelCase ).groups()[0].split(", " ) _lowerCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0] objects.extend(_lowerCamelCase ) elif _re_between_brackets.search(_lowerCamelCase ) is not None: _lowerCamelCase : List[str] = _re_between_brackets.search(_lowerCamelCase ).groups()[0].split(", " ) _lowerCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0] objects.extend(_lowerCamelCase ) elif _re_quote_object.search(_lowerCamelCase ) is not None: objects.append(_re_quote_object.search(_lowerCamelCase ).groups()[0] ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) elif line.startswith(" " * 12 + "\"" ): objects.append(line[13:-3] ) line_index += 1 _lowerCamelCase : Any = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _lowerCamelCase : str = [] while ( line_index < len(_lowerCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("else" ) ): _lowerCamelCase : Tuple = lines[line_index] _lowerCamelCase : Tuple = _re_import.search(_lowerCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 _lowerCamelCase : Optional[int] = {"none": objects} # Let's continue with backend-specific objects while line_index < len(_lowerCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. _lowerCamelCase : List[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _lowerCamelCase : Optional[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _lowerCamelCase : List[str] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ): _lowerCamelCase : Optional[Any] = lines[line_index] _lowerCamelCase : List[str] = _re_import.search(_lowerCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 12 ): objects.append(line[12:-2] ) line_index += 1 _lowerCamelCase : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' def find_duplicates(_lowerCamelCase ): return [k for k, v in collections.Counter(_lowerCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _lowerCamelCase : Dict = [] for key in import_dict_objects.keys(): _lowerCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) _lowerCamelCase : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _lowerCamelCase : int = "base imports" if key == "none" else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def lowerCamelCase_( ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = [] for root, _, files in os.walk(_lowerCamelCase ): if "__init__.py" in files: _lowerCamelCase : List[Any] = os.path.join(_lowerCamelCase , "__init__.py" ) _lowerCamelCase : List[Any] = parse_init(_lowerCamelCase ) if objects is not None: _lowerCamelCase : Union[str, Any] = analyze_results(*_lowerCamelCase ) if len(_lowerCamelCase ) > 0: _lowerCamelCase : str = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append("\n".join(_lowerCamelCase ) ) if len(_lowerCamelCase ) > 0: raise ValueError("\n\n".join(_lowerCamelCase ) ) def lowerCamelCase_( ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Dict = [] for path, directories, files in os.walk(_lowerCamelCase ): for folder in directories: # Ignore private modules if folder.startswith("_" ): directories.remove(_lowerCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_lowerCamelCase ) / folder).glob("*.py" ) ) ) == 0: continue _lowerCamelCase : Optional[Any] = str((Path(_lowerCamelCase ) / folder).relative_to(_lowerCamelCase ) ) _lowerCamelCase : List[Any] = short_path.replace(os.path.sep , "." ) submodules.append(_lowerCamelCase ) for fname in files: if fname == "__init__.py": continue _lowerCamelCase : Any = str((Path(_lowerCamelCase ) / fname).relative_to(_lowerCamelCase ) ) _lowerCamelCase : Tuple = short_path.replace(".py" , "" ).replace(os.path.sep , "." ) if len(submodule.split("." ) ) == 1: submodules.append(_lowerCamelCase ) return submodules _lowerCAmelCase : Optional[int] = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def lowerCamelCase_( ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Tuple = importlib.util.spec_from_file_location( "transformers" , os.path.join(_lowerCamelCase , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) _lowerCamelCase : List[str] = spec.loader.load_module() _lowerCamelCase : Tuple = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(_lowerCamelCase ) > 0: _lowerCamelCase : List[str] = "\n".join(F"""- {module}""" for module in module_not_registered ) raise ValueError( "The following submodules are not properly registered in the main init of Transformers:\n" F"""{list_of_modules}\n""" "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : int = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A_ ( _a ): lowerCAmelCase__ = 'mobilenet_v1' def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[Any] = depth_multiplier _lowerCamelCase : Any = min_depth _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Dict = tf_padding _lowerCamelCase : Union[str, Any] = classifier_dropout_prob _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowercase ( self: Any ): '''simple docstring''' return 1e-4
340
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor _lowerCAmelCase : Any = logging.get_logger(__name__) class A_ ( _a ): def __init__( self: List[Any] ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: List[Any] ): '''simple docstring''' warnings.warn( "The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PoolFormerImageProcessor instead." ,__lowerCAmelCase ,) super().__init__(*__lowerCAmelCase ,**__lowerCAmelCase )
340
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' for param in module.parameters(): _lowerCamelCase : Optional[int] = False def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Dict = plt.imshow(_lowerCamelCase ) fig.axes.get_xaxis().set_visible(_lowerCamelCase ) fig.axes.get_yaxis().set_visible(_lowerCamelCase ) plt.show() def lowerCamelCase_( ) -> str: '''simple docstring''' _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
340
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[int] = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[int] = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys _lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) _lowerCamelCase : list = [] for char_count in range(_lowerCamelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_lowerCamelCase ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
340
1
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig _lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) class A_ ( _a ): lowerCAmelCase__ = 'masked_bert' def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : str = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : int = pruning_method _lowerCamelCase : str = mask_init _lowerCamelCase : List[Any] = mask_scale
340
"""simple docstring""" _lowerCAmelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Any = [False] * len(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = [s] _lowerCamelCase : str = True while queue: _lowerCamelCase : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : Any = True _lowerCamelCase : Any = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = float("Inf" ) _lowerCamelCase : Dict = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _lowerCamelCase : Optional[Any] = sink while v != source: _lowerCamelCase : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : List[str] = parent[v] for i in range(len(_lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" _lowerCamelCase : int = False if num < 0: _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[Any] = -num _lowerCamelCase : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(_lowerCamelCase ) for e in binary ) return "0b" + "".join(str(_lowerCamelCase ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'camembert' def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Tuple = position_embedding_type _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Dict = classifier_dropout class A_ ( _a ): @property def _lowercase ( self: Any ): '''simple docstring''' if self.task == "multiple-choice": _lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: _lowerCamelCase : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
340
1
"""simple docstring""" import numpy as np import datasets _lowerCAmelCase : int = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' _lowerCAmelCase : str = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' _lowerCAmelCase : int = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): def _lowercase ( self: List[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" ,id="sequence" ) ,id="X" ), } ) ,) def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : Tuple = np.array(__lowerCAmelCase ) _lowerCamelCase : Tuple = np.array(__lowerCAmelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction _lowerCamelCase : Optional[Any] = X - np.mean(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = np.cov(reference_distribution.T ) try: _lowerCamelCase : str = np.linalg.inv(__lowerCAmelCase ) except np.linalg.LinAlgError: _lowerCamelCase : str = np.linalg.pinv(__lowerCAmelCase ) _lowerCamelCase : Dict = np.dot(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : str = np.dot(__lowerCAmelCase ,X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
340
"""simple docstring""" from collections import defaultdict def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : str = True for v in tree[start]: if v not in visited: ret += dfs(_lowerCamelCase ) if ret % 2 == 0: cuts.append(_lowerCamelCase ) return ret def lowerCamelCase_( ) -> int: '''simple docstring''' dfs(1 ) if __name__ == "__main__": _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9 _lowerCAmelCase : str = defaultdict(list) _lowerCAmelCase : dict[int, bool] = {} _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Any = 0 _lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : int = str(_lowerCamelCase ) return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" ) def lowerCamelCase_( ) -> int | None: '''simple docstring''' for base_num in range(9999 , 4999 , -1 ): _lowerCamelCase : Union[str, Any] = 100002 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate for base_num in range(333 , 99 , -1 ): _lowerCamelCase : Tuple = 1002003 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
340
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__''' _lowerCAmelCase : Dict = '''Dummy User''' _lowerCAmelCase : Optional[int] = '''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''' _lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co''' _lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(_lowerCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def lowerCamelCase_( ) -> str: '''simple docstring''' return HfApi(endpoint=_lowerCamelCase ) @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Dict = HfFolder.get_token() HfFolder.save_token(_lowerCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' def _cleanup_repo(_lowerCamelCase ): hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' @contextmanager def _temporary_repo(_lowerCamelCase ): try: yield repo_id finally: cleanup_repo(_lowerCamelCase ) return _temporary_repo @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
340
1
"""simple docstring""" from sklearn.metrics import matthews_corrcoef import datasets _lowerCAmelCase : List[Any] = ''' Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] ''' _lowerCAmelCase : Any = ''' Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results[\'matthews_correlation\'], 2)) -0.25 ''' _lowerCAmelCase : Optional[int] = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): def _lowercase ( self: Tuple ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) ,reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html" ] ,) def _lowercase ( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any]=None ): '''simple docstring''' return { "matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase ,__lowerCAmelCase ,sample_weight=__lowerCAmelCase ) ), }
340
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class A_ ( _a ): def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() _lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ): _lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,) # merge samples if i == 0: _lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample else: _lowerCamelCase : Optional[int] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,): '''simple docstring''' _lowerCamelCase : List[Any] = 0 _lowerCamelCase : str = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,) idx += 1 _lowerCamelCase : int = model_path_to_save + F"""_{idx}""" @classmethod def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : int = 0 _lowerCamelCase : str = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _lowerCamelCase : Dict = pretrained_model_path while os.path.isdir(__lowerCAmelCase ): _lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) controlnets.append(__lowerCAmelCase ) idx += 1 _lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" ) if len(__lowerCAmelCase ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(__lowerCAmelCase )
340
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : Any = {'''vocab_file''': '''vocab.txt'''} _lowerCAmelCase : Any = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } _lowerCAmelCase : Optional[int] = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } _lowerCAmelCase : Optional[Any] = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class A_ ( _a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ConvBertTokenizer def __init__( self: Optional[Any] ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Optional[int]="[UNK]" ,__lowerCAmelCase: Optional[Any]="[SEP]" ,__lowerCAmelCase: Any="[PAD]" ,__lowerCAmelCase: str="[CLS]" ,__lowerCAmelCase: Optional[int]="[MASK]" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,tokenizer_file=__lowerCAmelCase ,do_lower_case=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,sep_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,cls_token=__lowerCAmelCase ,mask_token=__lowerCAmelCase ,tokenize_chinese_chars=__lowerCAmelCase ,strip_accents=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" ,__lowerCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" ,__lowerCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" ,__lowerCAmelCase ) != tokenize_chinese_chars ): _lowerCamelCase : Tuple = getattr(__lowerCAmelCase ,normalizer_state.pop("type" ) ) _lowerCamelCase : Any = do_lower_case _lowerCamelCase : List[str] = strip_accents _lowerCamelCase : Dict = tokenize_chinese_chars _lowerCamelCase : Union[str, Any] = normalizer_class(**__lowerCAmelCase ) _lowerCamelCase : List[str] = do_lower_case def _lowercase ( self: Tuple ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any]=None ): '''simple docstring''' _lowerCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [self.sep_token_id] _lowerCamelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ): '''simple docstring''' _lowerCamelCase : List[str] = self._tokenizer.model.save(__lowerCAmelCase ,name=__lowerCAmelCase ) return tuple(__lowerCAmelCase )
340
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _lowerCamelCase : Tuple = "segformer.encoder." + key if key.startswith("backbone" ): _lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" ) if "norm" in key: _lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" ) if "layer_norm1" in key: _lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )] _lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" ) if "attn.q" in key: _lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _lowerCamelCase : Tuple = key.replace("attn" , "attention.self" ) if "fc1" in key: _lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" ) if "fc2" in key: _lowerCamelCase : Dict = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _lowerCamelCase : int = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" ) _lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )] _lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" ) if key.startswith("head" ): _lowerCamelCase : List[str] = key.replace("head" , "classifier" ) _lowerCamelCase : Union[str, Any] = value return new_state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _lowerCamelCase : int = kv_weight[ : config.hidden_sizes[i], : ] _lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]] _lowerCamelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCamelCase : Optional[Any] = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase_( ) -> Dict: '''simple docstring''' _lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return image @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Any = SegformerConfig() _lowerCamelCase : int = False # set attributes based on model_name _lowerCamelCase : Any = "huggingface/label-files" if "segformer" in model_name: _lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _lowerCamelCase : str = 150 _lowerCamelCase : Dict = "ade20k-id2label.json" _lowerCamelCase : Dict = (1, 150, 128, 128) elif "city" in model_name: _lowerCamelCase : List[str] = 19 _lowerCamelCase : Tuple = "cityscapes-id2label.json" _lowerCamelCase : Tuple = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: _lowerCamelCase : List[str] = True _lowerCamelCase : Tuple = model_name[4:6] _lowerCamelCase : Tuple = 1000 _lowerCamelCase : List[Any] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : int = 256 elif size == "b2": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : List[Any] = 768 _lowerCamelCase : Any = [3, 4, 6, 3] elif size == "b3": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : Union[str, Any] = 768 _lowerCamelCase : Optional[Any] = [3, 4, 18, 3] elif size == "b4": _lowerCamelCase : str = [64, 128, 320, 512] _lowerCamelCase : Optional[Any] = 768 _lowerCamelCase : Dict = [3, 8, 27, 3] elif size == "b5": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : Tuple = 768 _lowerCamelCase : Tuple = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) _lowerCamelCase : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase ) # prepare image _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: _lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) ) else: _lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_lowerCamelCase , _lowerCamelCase ) # create HuggingFace model and load state dict if encoder_only: _lowerCamelCase : Tuple = False _lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase ) else: _lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() # forward pass _lowerCamelCase : Any = model(_lowerCamelCase ) _lowerCamelCase : Dict = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCamelCase : str = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]], [[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]], [[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCamelCase : int = torch.tensor( [ [[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]], [[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]], [[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]], [[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]], [[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]], [[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]], [[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]], [[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]], [[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCamelCase : Dict = torch.tensor( [ [[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]], [[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]], [[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCamelCase : Optional[int] = torch.tensor( [ [[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]], [[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]], [[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]], [[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]], [[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCamelCase : List[Any] = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]], [[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]], [[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCamelCase : Any = torch.tensor( [ [[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]], [[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]], [[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]], [[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]], [[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCamelCase : str = torch.tensor( [ [[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]], [[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]], [[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]], ] ) else: _lowerCamelCase : Dict = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : str = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
340
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters _lowerCAmelCase : Optional[Any] = (720, 1280) # Height, Width _lowerCAmelCase : int = (0.4, 0.6) # if height or width lower than this scale, drop it. _lowerCAmelCase : Any = 1 / 100 _lowerCAmelCase : Optional[int] = '''''' _lowerCAmelCase : List[str] = '''''' _lowerCAmelCase : Optional[Any] = '''''' _lowerCAmelCase : Optional[Any] = 250 def lowerCamelCase_( ) -> None: '''simple docstring''' _lowerCamelCase, _lowerCamelCase : int = get_dataset(_lowerCamelCase , _lowerCamelCase ) for index in range(_lowerCamelCase ): _lowerCamelCase : Tuple = random.sample(range(len(_lowerCamelCase ) ) , 4 ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = update_image_and_anno( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , filter_scale=_lowerCamelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _lowerCamelCase : Optional[int] = random_chars(32 ) _lowerCamelCase : str = path.split(os.sep )[-1].rsplit("." , 1 )[0] _lowerCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(F"""{file_root}.jpg""" , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) _lowerCamelCase : Optional[int] = [] for anno in new_annos: _lowerCamelCase : Dict = anno[3] - anno[1] _lowerCamelCase : List[str] = anno[4] - anno[2] _lowerCamelCase : List[Any] = anno[1] + width / 2 _lowerCamelCase : Any = anno[2] + height / 2 _lowerCamelCase : Dict = F"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(_lowerCamelCase ) with open(F"""{file_root}.txt""" , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[list, list]: '''simple docstring''' _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : Any = [] for label_file in glob.glob(os.path.join(_lowerCamelCase , "*.txt" ) ): _lowerCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(_lowerCamelCase ) as in_file: _lowerCamelCase : List[str] = in_file.readlines() _lowerCamelCase : List[Any] = os.path.join(_lowerCamelCase , F"""{label_name}.jpg""" ) _lowerCamelCase : Optional[Any] = [] for obj_list in obj_lists: _lowerCamelCase : str = obj_list.rstrip("\n" ).split(" " ) _lowerCamelCase : Optional[Any] = float(obj[1] ) - float(obj[3] ) / 2 _lowerCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2 _lowerCamelCase : str = float(obj[1] ) + float(obj[3] ) / 2 _lowerCamelCase : List[str] = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(_lowerCamelCase ) labels.append(_lowerCamelCase ) return img_paths, labels def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 , ) -> tuple[list, list, str]: '''simple docstring''' _lowerCamelCase : str = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) _lowerCamelCase : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _lowerCamelCase : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) _lowerCamelCase : Optional[int] = int(scale_x * output_size[1] ) _lowerCamelCase : Tuple = int(scale_y * output_size[0] ) _lowerCamelCase : List[Any] = [] _lowerCamelCase : Any = [] for i, index in enumerate(_lowerCamelCase ): _lowerCamelCase : Optional[int] = all_img_list[index] path_list.append(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = all_annos[index] _lowerCamelCase : Tuple = cva.imread(_lowerCamelCase ) if i == 0: # top-left _lowerCamelCase : Any = cva.resize(_lowerCamelCase , (divid_point_x, divid_point_y) ) _lowerCamelCase : Any = img for bbox in img_annos: _lowerCamelCase : List[Any] = bbox[1] * scale_x _lowerCamelCase : str = bbox[2] * scale_y _lowerCamelCase : Union[str, Any] = bbox[3] * scale_x _lowerCamelCase : List[Any] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right _lowerCamelCase : List[Any] = cva.resize(_lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) ) _lowerCamelCase : Optional[Any] = img for bbox in img_annos: _lowerCamelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x) _lowerCamelCase : List[Any] = bbox[2] * scale_y _lowerCamelCase : List[Any] = scale_x + bbox[3] * (1 - scale_x) _lowerCamelCase : Tuple = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left _lowerCamelCase : Optional[Any] = cva.resize(_lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) ) _lowerCamelCase : Optional[int] = img for bbox in img_annos: _lowerCamelCase : Any = bbox[1] * scale_x _lowerCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y) _lowerCamelCase : Union[str, Any] = bbox[3] * scale_x _lowerCamelCase : Any = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right _lowerCamelCase : str = cva.resize( _lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) _lowerCamelCase : Union[str, Any] = img for bbox in img_annos: _lowerCamelCase : Tuple = scale_x + bbox[1] * (1 - scale_x) _lowerCamelCase : List[Any] = scale_y + bbox[2] * (1 - scale_y) _lowerCamelCase : Any = scale_x + bbox[3] * (1 - scale_x) _lowerCamelCase : int = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: _lowerCamelCase : Any = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" _lowerCamelCase : Tuple = ascii_lowercase + digits return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) ) if __name__ == "__main__": main() print('''DONE ✅''')
340
"""simple docstring""" _lowerCAmelCase : dict[tuple[int, int, int], int] = {} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _lowerCamelCase : Optional[int] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 ) _lowerCamelCase : List[Any] = state_late + state_absent + state_ontime _lowerCamelCase : int = prizestrings return prizestrings def lowerCamelCase_( _lowerCamelCase = 30 ) -> int: '''simple docstring''' return _calculate(_lowerCamelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
340
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : int = { '''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class A_ ( _a ): lowerCAmelCase__ = 'poolformer' def __init__( self: Any ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Optional[int]=16 ,__lowerCAmelCase: Union[str, Any]=16 ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: Tuple=4.0 ,__lowerCAmelCase: Optional[Any]=[2, 2, 6, 2] ,__lowerCAmelCase: Optional[Any]=[64, 128, 320, 512] ,__lowerCAmelCase: Dict=[7, 3, 3, 3] ,__lowerCAmelCase: Dict=[4, 2, 2, 2] ,__lowerCAmelCase: Optional[int]=[2, 1, 1, 1] ,__lowerCAmelCase: Optional[int]=4 ,__lowerCAmelCase: Union[str, Any]=0.0 ,__lowerCAmelCase: Optional[Any]="gelu" ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[int]=1e-5 ,__lowerCAmelCase: Dict=0.02 ,**__lowerCAmelCase: Optional[int] ,): '''simple docstring''' _lowerCamelCase : int = num_channels _lowerCamelCase : Optional[int] = patch_size _lowerCamelCase : Union[str, Any] = stride _lowerCamelCase : Optional[int] = padding _lowerCamelCase : int = pool_size _lowerCamelCase : Optional[Any] = hidden_sizes _lowerCamelCase : Optional[Any] = mlp_ratio _lowerCamelCase : str = depths _lowerCamelCase : List[str] = patch_sizes _lowerCamelCase : str = strides _lowerCamelCase : str = num_encoder_blocks _lowerCamelCase : Tuple = drop_path_rate _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : str = use_layer_scale _lowerCamelCase : List[Any] = layer_scale_init_value _lowerCamelCase : Optional[int] = initializer_range super().__init__(**__lowerCAmelCase ) class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: List[Any] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return 2e-3
340
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : int = str(_lowerCamelCase ) return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" ) def lowerCamelCase_( ) -> int | None: '''simple docstring''' for base_num in range(9999 , 4999 , -1 ): _lowerCamelCase : Union[str, Any] = 100002 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate for base_num in range(333 , 99 , -1 ): _lowerCamelCase : Tuple = 1002003 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
340
1
"""simple docstring""" import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowerCAmelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right _lowerCAmelCase : int = 5_0003 _lowerCAmelCase : List[Any] = 5_0002 @require_sentencepiece @require_tokenizers class A_ ( _a , unittest.TestCase ): lowerCAmelCase__ = PLBartTokenizer lowerCAmelCase__ = None lowerCAmelCase__ = False def _lowercase ( self: Dict ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : Dict = PLBartTokenizer(__lowerCAmelCase ,language_codes="base" ,keep_accents=__lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = PLBartTokenizer(__lowerCAmelCase ,language_codes="base" ,keep_accents=__lowerCAmelCase ) _lowerCamelCase : Any = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCAmelCase ,["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) _lowerCamelCase : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCAmelCase ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] ,) _lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) _lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] ,) _lowerCamelCase : Optional[int] = tokenizer.vocab_size _lowerCamelCase : Optional[Any] = [tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) for x in range(end - 4 ,__lowerCAmelCase )] self.assertListEqual(__lowerCAmelCase ,["__java__", "__python__", "__en_XX__", "<mask>"] ) _lowerCamelCase : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" _lowerCamelCase : Any = tokenizer(__lowerCAmelCase ).input_ids self.assertEqual( tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase ) ,__lowerCAmelCase ,) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] = PLBartTokenizer(__lowerCAmelCase ,language_codes="multi" ,keep_accents=__lowerCAmelCase ) _lowerCamelCase : Any = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCAmelCase ,["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) _lowerCamelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCAmelCase ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] ,) _lowerCamelCase : str = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) _lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase ,[ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] ,) _lowerCamelCase : Optional[Any] = tokenizer.vocab_size _lowerCamelCase : Optional[int] = [tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) for x in range(end - 7 ,__lowerCAmelCase )] self.assertListEqual( __lowerCAmelCase ,["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] ) _lowerCamelCase : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" _lowerCamelCase : Optional[Any] = tokenizer(__lowerCAmelCase ).input_ids self.assertEqual( tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase ) ,__lowerCAmelCase ,) @require_torch @require_sentencepiece @require_tokenizers class A_ ( unittest.TestCase ): lowerCAmelCase__ = 'uclanlp/plbart-python-en_XX' lowerCAmelCase__ = [ 'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])', 'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])', ] lowerCAmelCase__ = [ 'Returns the maximum value of a b c.', 'Sums the values of a b c.', ] lowerCAmelCase__ = [ 1_3_4, 5_4_5_2, 3_3_4_6_0, 3_3_4_4_1, 3_3_4_6_3, 3_3_4_6_5, 3_3_4_6_3, 3_3_4_4_9, 9_8_8, 2_0, 3_3_4_5_6, 1_9, 3_3_4_5_6, 7_7_1, 3_9, 4_2_5_8, 8_8_9, 3_3_1_8, 3_3_4_4_1, 3_3_4_6_3, 3_3_4_6_5, 3_3_4_6_3, 3_3_4_4_9, 2_4_7_1, 2, PYTHON_CODE, ] @classmethod def _lowercase ( cls: Optional[int] ): '''simple docstring''' _lowerCamelCase : PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name ,language_codes="base" ,src_lang="python" ,tgt_lang="en_XX" ) _lowerCamelCase : Optional[Any] = 1 return cls def _lowercase ( self: Optional[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] ,50_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] ,50_002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] ,50_003 ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,__lowerCAmelCase ) def _lowercase ( self: int ): '''simple docstring''' self.assertIn(__lowerCAmelCase ,self.tokenizer.all_special_ids ) _lowerCamelCase : Dict = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2] _lowerCamelCase : Dict = self.tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token ,__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20] self.assertIsInstance(src_text[0] ,__lowerCAmelCase ) _lowerCamelCase : int = 10 _lowerCamelCase : Optional[int] = self.tokenizer(__lowerCAmelCase ,max_length=__lowerCAmelCase ,truncation=__lowerCAmelCase ).input_ids[0] self.assertEqual(ids[-2] ,2 ) self.assertEqual(ids[-1] ,__lowerCAmelCase ) self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) ,[50_004, 50_001] ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : str = tempfile.mkdtemp() _lowerCamelCase : Any = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : int = PLBartTokenizer.from_pretrained(__lowerCAmelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,__lowerCAmelCase ) @require_torch def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=__lowerCAmelCase ,return_tensors="pt" ) _lowerCamelCase : Union[str, Any] = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] ,__lowerCAmelCase ) self.assertEqual(batch.decoder_input_ids[1][-1] ,2 ) self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] ) @require_torch def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : int = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors="pt" ,) _lowerCamelCase : Any = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id ) self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase ) self.assertEqual((2, 26) ,batch.input_ids.shape ) self.assertEqual((2, 26) ,batch.attention_mask.shape ) _lowerCamelCase : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,__lowerCAmelCase ) self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : List[str] = self.tokenizer(self.src_text ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=3 ,return_tensors="pt" ) _lowerCamelCase : List[str] = self.tokenizer( text_target=self.tgt_text ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=10 ,return_tensors="pt" ) _lowerCamelCase : Optional[int] = targets["input_ids"] _lowerCamelCase : List[str] = shift_tokens_right(__lowerCAmelCase ,self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Dict = self.tokenizer._build_translation_inputs( "A test" ,return_tensors="pt" ,src_lang="en_XX" ,tgt_lang="java" ) self.assertEqual( nested_simplify(__lowerCAmelCase ) ,{ # A, test, EOS, en_XX "input_ids": [[150, 242, 2, 50_003]], "attention_mask": [[1, 1, 1, 1]], # java "forced_bos_token_id": 50_001, } ,)
340
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A_ ( _a ): lowerCAmelCase__ = 'char' lowerCAmelCase__ = 'bpe' lowerCAmelCase__ = 'wp' _lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A_ ( _a ): lowerCAmelCase__ = ['image_processor', 'char_tokenizer'] lowerCAmelCase__ = 'ViTImageProcessor' lowerCAmelCase__ = 'MgpstrTokenizer' def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,__lowerCAmelCase ,) _lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" ) _lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) _lowerCamelCase : List[str] = tokenizer _lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" ) _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ): '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is not None: _lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _lowerCamelCase : Tuple = encodings["input_ids"] return inputs def _lowercase ( self: int ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences _lowerCamelCase : Dict = char_preds.size(0 ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" ) _lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" ) _lowerCamelCase : List[str] = [] _lowerCamelCase : str = [] for i in range(__lowerCAmelCase ): _lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowerCamelCase : Tuple = {} _lowerCamelCase : Tuple = final_strs _lowerCamelCase : int = final_scores _lowerCamelCase : str = char_strs _lowerCamelCase : Dict = bpe_strs _lowerCamelCase : int = wp_strs return out def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ): '''simple docstring''' if format == DecodeType.CHARACTER: _lowerCamelCase : int = self.char_decode _lowerCamelCase : List[str] = 1 _lowerCamelCase : Optional[int] = "[s]" elif format == DecodeType.BPE: _lowerCamelCase : Dict = self.bpe_decode _lowerCamelCase : str = 2 _lowerCamelCase : Union[str, Any] = "#" elif format == DecodeType.WORDPIECE: _lowerCamelCase : int = self.wp_decode _lowerCamelCase : List[str] = 102 _lowerCamelCase : List[Any] = "[SEP]" else: raise ValueError(F"""Format {format} is not supported.""" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Any = pred_logits.size(0 ) _lowerCamelCase : int = pred_logits.size(1 ) _lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:] _lowerCamelCase : List[str] = decoder(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 ) _lowerCamelCase : Any = preds_max_prob[:, 1:] for index in range(__lowerCAmelCase ): _lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_str[index][:pred_eos] _lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist() _lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1 _lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1] _lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowerCAmelCase ) conf_scores.append(__lowerCAmelCase ) return dec_strs, conf_scores def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(__lowerCAmelCase ) def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs
340
1
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowerCAmelCase : Optional[int] = logging.getLogger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' if os.path.exists(_lowerCamelCase ): if os.path.exists(os.path.join(_lowerCamelCase , "config.json" ) ) and os.path.isfile( os.path.join(_lowerCamelCase , "config.json" ) ): os.remove(os.path.join(_lowerCamelCase , "config.json" ) ) if os.path.exists(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(_lowerCamelCase , "pytorch_model.bin" ) ): os.remove(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) ) else: os.makedirs(_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = 2 if unlogit: _lowerCamelCase : List[Any] = torch.pow(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Dict = p * torch.log(_lowerCamelCase ) _lowerCamelCase : Optional[Any] = 0 return -plogp.sum(dim=-1 ) def lowerCamelCase_( _lowerCamelCase ) -> List[Any]: '''simple docstring''' logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(_lowerCamelCase ) ) ) ) for row in range(len(_lowerCamelCase ) ): if tensor.dtype != torch.long: logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) ) else: logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Any = model.config.num_hidden_layers, model.config.num_attention_heads _lowerCamelCase : Optional[int] = torch.zeros(_lowerCamelCase , _lowerCamelCase ).to(args.device ) _lowerCamelCase : Dict = torch.zeros(_lowerCamelCase , _lowerCamelCase ).to(args.device ) if head_mask is None: _lowerCamelCase : Optional[int] = torch.ones(_lowerCamelCase , _lowerCamelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=_lowerCamelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _lowerCamelCase : str = None _lowerCamelCase : Optional[Any] = 0.0 _lowerCamelCase : Optional[Any] = 0.0 for step, inputs in enumerate(tqdm(_lowerCamelCase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): _lowerCamelCase : Any = tuple(t.to(args.device ) for t in inputs ) ((_lowerCamelCase), ) : List[str] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _lowerCamelCase : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase , head_mask=_lowerCamelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_lowerCamelCase ): _lowerCamelCase : str = entropy(attn.detach() , _lowerCamelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_lowerCamelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _lowerCamelCase : List[Any] = 2 _lowerCamelCase : int = torch.pow(torch.pow(_lowerCamelCase , _lowerCamelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _lowerCamelCase : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(_lowerCamelCase ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(_lowerCamelCase ) logger.info("Head ranked by importance scores" ) _lowerCamelCase : int = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _lowerCamelCase : List[str] = torch.arange( head_importance.numel() , device=args.device ) _lowerCamelCase : Optional[Any] = head_ranks.view_as(_lowerCamelCase ) print_ad_tensor(_lowerCamelCase ) return attn_entropy, head_importance, total_loss def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase ) _lowerCamelCase : Dict = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , _lowerCamelCase , original_score * args.masking_threshold ) _lowerCamelCase : Any = torch.ones_like(_lowerCamelCase ) _lowerCamelCase : List[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _lowerCamelCase : int = original_score while current_score >= original_score * args.masking_threshold: _lowerCamelCase : Optional[Any] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _lowerCamelCase : Optional[int] = float("Inf" ) _lowerCamelCase : Union[str, Any] = head_importance.view(-1 ).sort()[1] if len(_lowerCamelCase ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads _lowerCamelCase : List[str] = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) _lowerCamelCase : Optional[int] = new_head_mask.view(-1 ) _lowerCamelCase : str = 0.0 _lowerCamelCase : Optional[int] = new_head_mask.view_as(_lowerCamelCase ) _lowerCamelCase : Optional[Any] = new_head_mask.clone().detach() print_ad_tensor(_lowerCamelCase ) # Compute metric and head importance again _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = compute_heads_importance( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , head_mask=_lowerCamelCase ) _lowerCamelCase : Optional[Any] = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , _lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("Final head mask" ) print_ad_tensor(_lowerCamelCase ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : Dict = datetime.now() _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = compute_heads_importance( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = 1 / loss _lowerCamelCase : Tuple = datetime.now() - before_time _lowerCamelCase : Optional[int] = sum(p.numel() for p in model.parameters() ) _lowerCamelCase : List[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCamelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Dict = [ v, ] assert sum(len(_lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_lowerCamelCase ) _lowerCamelCase : List[str] = sum(p.numel() for p in model.parameters() ) _lowerCamelCase : List[str] = datetime.now() _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = compute_heads_importance( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase , actually_pruned=_lowerCamelCase , ) _lowerCamelCase : List[str] = 1 / loss _lowerCamelCase : Any = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , _lowerCamelCase , _lowerCamelCase , pruned_num_params / original_num_params * 100 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , _lowerCamelCase , _lowerCamelCase ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 ) save_model(_lowerCamelCase , args.output_dir ) def lowerCamelCase_( ) -> int: '''simple docstring''' _lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=_lowerCamelCase , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=_lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=_lowerCamelCase , type=_lowerCamelCase , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=_lowerCamelCase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=_lowerCamelCase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=_lowerCamelCase , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=_lowerCamelCase , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=128 , type=_lowerCamelCase , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=_lowerCamelCase , help="Batch size." ) parser.add_argument("--seed" , type=_lowerCamelCase , default=42 ) parser.add_argument("--local_rank" , type=_lowerCamelCase , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." ) _lowerCamelCase : Dict = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCamelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _lowerCamelCase : int = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) _lowerCamelCase : Optional[int] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _lowerCamelCase : Optional[int] = torch.device("cuda" , args.local_rank ) _lowerCamelCase : List[str] = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _lowerCamelCase : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _lowerCamelCase : Dict = nn.parallel.DistributedDataParallel( _lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCamelCase ) elif args.n_gpu > 1: _lowerCamelCase : str = nn.DataParallel(_lowerCamelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_lowerCamelCase ) torch.save(_lowerCamelCase , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , _lowerCamelCase ) # Prepare dataset _lowerCamelCase : Optional[int] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _lowerCamelCase : Dict = (torch.from_numpy(_lowerCamelCase ),) _lowerCamelCase : int = TensorDataset(*_lowerCamelCase ) _lowerCamelCase : Optional[int] = RandomSampler(_lowerCamelCase ) _lowerCamelCase : Tuple = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _lowerCamelCase : Union[str, Any] = mask_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) prune_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": main()
340
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _lowerCAmelCase : List[Any] = get_logger(__name__) class A_ : lowerCAmelCase__ = 'dummy_data' lowerCAmelCase__ = 'datasets' lowerCAmelCase__ = False def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,): '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : List[str] = dataset_name _lowerCamelCase : Optional[int] = cache_dir _lowerCamelCase : Optional[int] = use_local_dummy_data _lowerCamelCase : int = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : int = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : Tuple = str(__lowerCAmelCase ) # to be downloaded _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Dict = None @property def _lowercase ( self: str ): '''simple docstring''' if self._dummy_file is None: _lowerCamelCase : List[str] = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self: str ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : Optional[int] = cached_path( __lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase ) return os.path.join(__lowerCAmelCase ,self.dummy_file_name ) @property def _lowercase ( self: Tuple ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def _lowercase ( self: List[str] ): '''simple docstring''' if self._bucket_url is None: _lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Tuple = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,(list, tuple) ): return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase ) else: return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' return path def _lowercase ( self: Optional[int] ): '''simple docstring''' return {} def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): for single_url in single_urls: download_callback(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = single_urls download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls] else: _lowerCamelCase : Union[str, Any] = single_urls _lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) _lowerCamelCase : List[Any] = value # make sure that values are unique if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url ) _lowerCamelCase : Optional[Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__lowerCAmelCase ) return dummy_data_list def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' def _iter_archive_members(__lowerCAmelCase: Any ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : Tuple = Path(self.dummy_file ).parent _lowerCamelCase : str = path.relative_to(__lowerCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase ) _lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = [paths] for path in paths: if os.path.isfile(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__lowerCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
340
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : int = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class A_ ( _a ): lowerCAmelCase__ = 'sew' def __init__( self: Tuple ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: int=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: int=3_072 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Tuple="gelu" ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: int=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-5 ,__lowerCAmelCase: str="group" ,__lowerCAmelCase: Union[str, Any]="gelu" ,__lowerCAmelCase: int=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,__lowerCAmelCase: List[str]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,__lowerCAmelCase: List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: List[Any]=128 ,__lowerCAmelCase: Optional[Any]=16 ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[str]=0.05 ,__lowerCAmelCase: Dict=10 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: str=10 ,__lowerCAmelCase: Tuple=0 ,__lowerCAmelCase: int="mean" ,__lowerCAmelCase: Tuple=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=256 ,__lowerCAmelCase: Dict=0 ,__lowerCAmelCase: Tuple=1 ,__lowerCAmelCase: Union[str, Any]=2 ,**__lowerCAmelCase: Tuple ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ,pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ) _lowerCamelCase : Any = hidden_size _lowerCamelCase : Optional[int] = feat_extract_norm _lowerCamelCase : Optional[int] = feat_extract_activation _lowerCamelCase : str = list(__lowerCAmelCase ) _lowerCamelCase : List[str] = list(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = list(__lowerCAmelCase ) _lowerCamelCase : Dict = conv_bias _lowerCamelCase : Optional[Any] = num_conv_pos_embeddings _lowerCamelCase : str = num_conv_pos_embedding_groups _lowerCamelCase : str = len(self.conv_dim ) _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : Any = intermediate_size _lowerCamelCase : Dict = squeeze_factor _lowerCamelCase : Optional[int] = hidden_act _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : Any = hidden_dropout _lowerCamelCase : Dict = attention_dropout _lowerCamelCase : Union[str, Any] = activation_dropout _lowerCamelCase : int = feat_proj_dropout _lowerCamelCase : List[Any] = final_dropout _lowerCamelCase : Any = layerdrop _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Union[str, Any] = apply_spec_augment _lowerCamelCase : Tuple = mask_time_prob _lowerCamelCase : List[str] = mask_time_length _lowerCamelCase : Dict = mask_time_min_masks _lowerCamelCase : int = mask_feature_prob _lowerCamelCase : str = mask_feature_length _lowerCamelCase : int = mask_feature_min_masks # ctc loss _lowerCamelCase : List[Any] = ctc_loss_reduction _lowerCamelCase : str = ctc_zero_infinity # sequence classification _lowerCamelCase : Dict = use_weighted_layer_sum _lowerCamelCase : Optional[Any] = classifier_proj_size @property def _lowercase ( self: List[Any] ): '''simple docstring''' return functools.reduce(operator.mul ,self.conv_stride ,1 )
340
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) _lowerCamelCase : int = precision _lowerCamelCase : Dict = ceil(precision / 14 ) _lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt() _lowerCamelCase : int = 1 _lowerCamelCase : Optional[int] = 13591409 _lowerCamelCase : int = Decimal(_lowerCamelCase ) for k in range(1 , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
340
1
"""simple docstring""" _lowerCAmelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Any = [False] * len(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = [s] _lowerCamelCase : str = True while queue: _lowerCamelCase : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : Any = True _lowerCamelCase : Any = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = float("Inf" ) _lowerCamelCase : Dict = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _lowerCamelCase : Optional[Any] = sink while v != source: _lowerCamelCase : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : List[str] = parent[v] for i in range(len(_lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
340
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ ( _a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCamelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCamelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowerCamelCase : str = [] for i in range(_lowerCamelCase ): _lowerCamelCase : Any = i / num_diffusion_timesteps _lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) ) return torch.tensor(_lowerCamelCase , dtype=torch.floataa ) class A_ ( _a , _a ): @register_to_config def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) _lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = 1.0 - self.betas _lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 ) _lowerCamelCase : int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowerCamelCase : Tuple = 1.0 # setable values _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() ) _lowerCamelCase : List[str] = variance_type def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ): '''simple docstring''' return sample def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ): '''simple docstring''' _lowerCamelCase : str = num_inference_steps _lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ): '''simple docstring''' if prev_timestep is None: _lowerCamelCase : List[str] = t - 1 _lowerCamelCase : Optional[int] = self.alphas_cumprod[t] _lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : List[Any] = self.betas[t] else: _lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowerCamelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) ) _lowerCamelCase : str = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowerCamelCase : str = variance.log() _lowerCamelCase : str = beta.log() _lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2 _lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,): '''simple docstring''' _lowerCamelCase : str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 ) else: _lowerCamelCase : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: _lowerCamelCase : List[Any] = t - 1 _lowerCamelCase : Dict = self.alphas_cumprod[t] _lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Any = self.betas[t] _lowerCamelCase : str = self.alphas[t] else: _lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev _lowerCamelCase : Optional[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowerCamelCase : List[Any] = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowerCamelCase : Any = torch.clamp( __lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Union[str, Any] = 0 if t > 0: _lowerCamelCase : Dict = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device ) _lowerCamelCase : Any = self._get_variance( __lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,) if self.variance_type == "fixed_small_log": _lowerCamelCase : Optional[Any] = variance elif self.variance_type == "learned_range": _lowerCamelCase : Optional[int] = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) _lowerCamelCase : Dict = variance * variance_noise _lowerCamelCase : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,): '''simple docstring''' _lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) _lowerCamelCase : Any = timesteps.to(original_samples.device ) _lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 _lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
340
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]: '''simple docstring''' if nth_term == "": return [""] _lowerCamelCase : List[str] = int(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = int(_lowerCamelCase ) _lowerCamelCase : list[str] = [] for temp in range(int(_lowerCamelCase ) ): series.append(F"""1 / {pow(temp + 1 , int(_lowerCamelCase ) )}""" if series else "1" ) return series if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : int = int(input('''Enter the last number (nth term) of the P-Series''')) _lowerCAmelCase : List[str] = int(input('''Enter the power for P-Series''')) print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''') print(p_series(nth_term, power))
340
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : str = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' for attribute in key.split("." ): _lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: _lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: _lowerCamelCase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCamelCase : Tuple = value elif weight_type == "weight_g": _lowerCamelCase : List[str] = value elif weight_type == "weight_v": _lowerCamelCase : List[Any] = value elif weight_type == "bias": _lowerCamelCase : str = value elif weight_type == "running_mean": _lowerCamelCase : Optional[int] = value elif weight_type == "running_var": _lowerCamelCase : Optional[Any] = value elif weight_type == "num_batches_tracked": _lowerCamelCase : int = value elif weight_type == "inv_freq": _lowerCamelCase : List[str] = value else: _lowerCamelCase : Optional[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = fairseq_model.state_dict() _lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) _lowerCamelCase : List[Any] = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _lowerCamelCase : int = True if "*" in mapped_key: _lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2] _lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase ) if "pos_bias_u" in name: _lowerCamelCase : int = None elif "pos_bias_v" in name: _lowerCamelCase : Any = None elif "weight_g" in name: _lowerCamelCase : Any = "weight_g" elif "weight_v" in name: _lowerCamelCase : Any = "weight_v" elif "bias" in name: _lowerCamelCase : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCamelCase : Dict = "weight" elif "running_mean" in name: _lowerCamelCase : str = "running_mean" elif "inv_freq" in name: _lowerCamelCase : List[Any] = "inv_freq" elif "running_var" in name: _lowerCamelCase : Tuple = "running_var" elif "num_batches_tracked" in name: _lowerCamelCase : str = "num_batches_tracked" else: _lowerCamelCase : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = full_name.split("conv_layers." )[-1] _lowerCamelCase : List[Any] = name.split("." ) _lowerCamelCase : Union[str, Any] = int(items[0] ) _lowerCamelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCamelCase : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCamelCase : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCamelCase : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict: '''simple docstring''' if config_path is not None: _lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" ) else: _lowerCamelCase : Dict = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCamelCase : List[Any] = "rotary" if is_finetuned: if dict_path: _lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : Optional[int] = target_dict.pad_index _lowerCamelCase : Dict = target_dict.bos_index _lowerCamelCase : Optional[Any] = target_dict.eos_index _lowerCamelCase : str = len(target_dict.symbols ) _lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) _lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) _lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) _lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase ) else: _lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" ) _lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) _lowerCamelCase : Dict = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
340
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : str = logging.get_logger(__name__) _lowerCAmelCase : Tuple = { '''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class A_ ( _a ): lowerCAmelCase__ = 'donut-swin' lowerCAmelCase__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self: Dict ,__lowerCAmelCase: Optional[int]=224 ,__lowerCAmelCase: List[str]=4 ,__lowerCAmelCase: Any=3 ,__lowerCAmelCase: Tuple=96 ,__lowerCAmelCase: Optional[int]=[2, 2, 6, 2] ,__lowerCAmelCase: Any=[3, 6, 12, 24] ,__lowerCAmelCase: Optional[Any]=7 ,__lowerCAmelCase: List[str]=4.0 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Optional[int]=0.0 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Optional[int]="gelu" ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: int=0.02 ,__lowerCAmelCase: Tuple=1e-5 ,**__lowerCAmelCase: Optional[Any] ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) _lowerCamelCase : List[Any] = image_size _lowerCamelCase : Tuple = patch_size _lowerCamelCase : Dict = num_channels _lowerCamelCase : List[Any] = embed_dim _lowerCamelCase : List[str] = depths _lowerCamelCase : Dict = len(__lowerCAmelCase ) _lowerCamelCase : int = num_heads _lowerCamelCase : List[Any] = window_size _lowerCamelCase : Optional[Any] = mlp_ratio _lowerCamelCase : List[str] = qkv_bias _lowerCamelCase : List[Any] = hidden_dropout_prob _lowerCamelCase : Any = attention_probs_dropout_prob _lowerCamelCase : Tuple = drop_path_rate _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : List[str] = use_absolute_embeddings _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCamelCase : int = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) ) def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' if point: if isinstance(_lowerCamelCase , _lowerCamelCase ): for item in point: if not isinstance(_lowerCamelCase , (int, float) ): _lowerCamelCase : Dict = ( "Expected a list of numbers as input, found " F"""{type(_lowerCamelCase ).__name__}""" ) raise TypeError(_lowerCamelCase ) else: _lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}""" raise TypeError(_lowerCamelCase ) else: raise ValueError("Missing an input" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase = 1000 ) -> int: '''simple docstring''' _lowerCamelCase : Tuple = 3 _lowerCamelCase : List[Any] = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'''{solution() = }''')
340
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[Any] = np.inf def set_batch_size(_lowerCamelCase ) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary": _lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_lowerCamelCase , _lowerCamelCase ) return None if batch_size is np.inf else batch_size class A_ ( _a ): def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths} _lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1] _lowerCamelCase : int = Parquet( cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,) def _lowercase ( self: Optional[int] ): '''simple docstring''' if self.streaming: _lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCamelCase : Tuple = None _lowerCamelCase : Optional[int] = None _lowerCamelCase : List[str] = None _lowerCamelCase : str = None self.builder.download_and_prepare( download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,) _lowerCamelCase : Any = self.builder.as_dataset( split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset class A_ : def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' _lowerCamelCase : Any = dataset _lowerCamelCase : Any = path_or_buf _lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features ) _lowerCamelCase : List[str] = parquet_writer_kwargs def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with open(self.path_or_buf ,"wb+" ) as buffer: _lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) else: _lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) return written def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[str] = 0 _lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase ) _lowerCamelCase : List[str] = self.dataset.features.arrow_schema _lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase ) for offset in logging.tqdm( range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,): _lowerCamelCase : List[str] = query_table( table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,) writer.write_table(__lowerCAmelCase ) written += batch.nbytes writer.close() return written
340
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator class A_ : def __init__( self: Union[str, Any] ,__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : List[Any] = value _lowerCamelCase : Node | None = None _lowerCamelCase : Node | None = None class A_ : def __init__( self: Tuple ,__lowerCAmelCase: Node ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = tree def _lowercase ( self: int ,__lowerCAmelCase: Node | None ): '''simple docstring''' if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self: Any ): '''simple docstring''' yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Optional[int] = {} _lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"] _lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] ) return output _lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments) _lowerCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: _lowerCAmelCase : Any = multiprocessing.cpu_count() _lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowerCAmelCase : Union[str, Any] = time.time() _lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''') print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') _lowerCAmelCase : Any = time.time() _lowerCAmelCase : Dict = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') _lowerCAmelCase : str = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
340
1
"""simple docstring""" from __future__ import annotations _lowerCAmelCase : Union[str, Any] = 8.988e9 # units = N * m^s * C^-2 def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> dict[str, float]: '''simple docstring''' _lowerCamelCase : Tuple = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if distance < 0: raise ValueError("Distance cannot be negative" ) if force == 0: _lowerCamelCase : int = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: _lowerCamelCase : List[Any] = abs(_lowerCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: _lowerCamelCase : str = abs(_lowerCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: _lowerCamelCase : Any = (COULOMBS_CONSTANT * charge_product / abs(_lowerCamelCase )) ** 0.5 return {"distance": distance} raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[Any] = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'xlm-roberta' def __init__( self: List[Any] ,__lowerCAmelCase: int=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Optional[Any]=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: str="gelu" ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Tuple=0.1 ,__lowerCAmelCase: str=512 ,__lowerCAmelCase: Any=2 ,__lowerCAmelCase: Any=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Optional[Any]=0 ,__lowerCAmelCase: Dict=2 ,__lowerCAmelCase: int="absolute" ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: List[str]=None ,**__lowerCAmelCase: Tuple ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = vocab_size _lowerCamelCase : Optional[int] = hidden_size _lowerCamelCase : Optional[int] = num_hidden_layers _lowerCamelCase : Any = num_attention_heads _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Optional[int] = intermediate_size _lowerCamelCase : Optional[int] = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : List[str] = max_position_embeddings _lowerCamelCase : Union[str, Any] = type_vocab_size _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : Optional[Any] = layer_norm_eps _lowerCamelCase : Optional[Any] = position_embedding_type _lowerCamelCase : Union[str, Any] = use_cache _lowerCamelCase : List[Any] = classifier_dropout class A_ ( _a ): @property def _lowercase ( self: Tuple ): '''simple docstring''' if self.task == "multiple-choice": _lowerCamelCase : List[Any] = {0: "batch", 1: "choice", 2: "sequence"} else: _lowerCamelCase : List[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
340
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig _lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) class A_ ( _a ): lowerCAmelCase__ = 'masked_bert' def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : str = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : int = pruning_method _lowerCamelCase : str = mask_init _lowerCamelCase : List[Any] = mask_scale
340
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None: '''simple docstring''' _lowerCamelCase : List[str] = len(_lowerCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowerCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , ) def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' _lowerCamelCase : list[list[str]] = [] depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase ) # Print all the boards for board in boards: for column in board: print(_lowerCamelCase ) print("" ) print(len(_lowerCamelCase ) , "solutions were found." ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
340
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model _lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]: '''simple docstring''' if rng is None: _lowerCamelCase : Union[str, Any] = random.Random() _lowerCamelCase : Union[str, Any] = 1 for dim in shape: total_dims *= dim _lowerCamelCase : Optional[int] = [] for _ in range(_lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase ) return output def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase ) # make sure that at least one token is attended to for each batch _lowerCamelCase : List[str] = 1 return attn_mask @require_flax class A_ : lowerCAmelCase__ = None lowerCAmelCase__ = () def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _lowerCamelCase : List[str] = 2 _lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2 _lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length] _lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase ) _lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _lowerCamelCase : List[str] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = False _lowerCamelCase : Dict = max_length _lowerCamelCase : Tuple = 0 for model_class in self.all_generative_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval() _lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params ) _lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences _lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config() _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[int] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = max_length _lowerCamelCase : Dict = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[str] = model_class(__lowerCAmelCase ) _lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config() _lowerCamelCase : Tuple = False _lowerCamelCase : Union[str, Any] = max_length _lowerCamelCase : List[str] = 2 _lowerCamelCase : Optional[int] = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() _lowerCamelCase : int = True _lowerCamelCase : List[Any] = max_length _lowerCamelCase : Optional[Any] = 0.8 _lowerCamelCase : Union[str, Any] = 10 _lowerCamelCase : List[str] = 0.3 _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : str = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : int = jit(model.generate ) _lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[str] = max_length _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : Dict = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Any = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() _lowerCamelCase : Dict = max_length _lowerCamelCase : List[Any] = 2 _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[str] = 8 _lowerCamelCase : List[Any] = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : int = model_class(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : Dict = False _lowerCamelCase : Any = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : int = 2 _lowerCamelCase : int = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) @require_flax class A_ ( unittest.TestCase ): def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) _lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) _lowerCamelCase : Optional[Any] = "Hello world" _lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ): model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ): _lowerCamelCase : List[str] = {"foo": "bar"} model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
340
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class A_ ( unittest.TestCase ): def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Dict = tempfile.mkdtemp() # fmt: off _lowerCamelCase : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on _lowerCamelCase : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) _lowerCamelCase : str = { "do_resize": True, "size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } _lowerCamelCase : Tuple = os.path.join(self.tmpdirname ,__lowerCAmelCase ) with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp: json.dump(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: List[Any] ,**__lowerCAmelCase: Union[str, Any] ): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ) def _lowercase ( self: Tuple ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ) def _lowercase ( self: Optional[int] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] _lowerCamelCase : Optional[int] = [Image.fromarray(np.moveaxis(__lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : Dict = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCamelCase : List[str] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor ,__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowerCamelCase : Optional[int] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) _lowerCamelCase : str = self.get_image_processor(do_normalize=__lowerCAmelCase ,padding_value=1.0 ) _lowerCamelCase : str = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=__lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__lowerCAmelCase ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Any = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Tuple = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : Dict = self.prepare_image_inputs() _lowerCamelCase : Tuple = image_processor(__lowerCAmelCase ,return_tensors="np" ) _lowerCamelCase : Optional[Any] = processor(images=__lowerCAmelCase ,return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Any = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : Tuple = "lower newer" _lowerCamelCase : Any = processor(text=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer(__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Any = self.get_image_processor() _lowerCamelCase : Union[str, Any] = self.get_tokenizer() _lowerCamelCase : Tuple = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : Any = "lower newer" _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : Union[str, Any] = processor(text=__lowerCAmelCase ,images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with self.assertRaises(__lowerCAmelCase ): processor() def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.get_image_processor() _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : Any = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Optional[Any] = processor.batch_decode(__lowerCAmelCase ) _lowerCamelCase : str = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = "lower newer" _lowerCamelCase : Tuple = self.prepare_image_inputs() _lowerCamelCase : str = processor(text=__lowerCAmelCase ,images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : int = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A_ ( _a ): lowerCAmelCase__ = 'mobilenet_v1' def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[Any] = depth_multiplier _lowerCamelCase : Any = min_depth _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Dict = tf_padding _lowerCamelCase : Union[str, Any] = classifier_dropout_prob _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowercase ( self: Any ): '''simple docstring''' return 1e-4
340
1
"""simple docstring""" import argparse import copy def lowerCamelCase_( _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Dict = {} with open(_lowerCamelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: _lowerCamelCase : str = [] _list.append([line.split()[1], line.split()[2]] ) _lowerCamelCase : List[str] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: _lowerCamelCase : Dict = [] _list.append([line.split()[0], line.split()[2]] ) _lowerCamelCase : Any = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' with open(_lowerCamelCase ) as f: _lowerCamelCase : Tuple = f.read(1 ) _lowerCamelCase : int = start_node _lowerCamelCase : Tuple = [] _lowerCamelCase : Optional[Any] = start_node _lowerCamelCase : Any = 0 while visiting not in first_solution: _lowerCamelCase : int = 10000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution: _lowerCamelCase : Optional[Any] = k[1] _lowerCamelCase : Optional[int] = k[0] first_solution.append(_lowerCamelCase ) _lowerCamelCase : Dict = distance_of_first_solution + int(_lowerCamelCase ) _lowerCamelCase : List[str] = best_node first_solution.append(_lowerCamelCase ) _lowerCamelCase : Tuple = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 _lowerCamelCase : List[str] = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10000 ) return first_solution, distance_of_first_solution def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Union[str, Any] = [] for n in solution[1:-1]: _lowerCamelCase : Dict = solution.index(_lowerCamelCase ) for kn in solution[1:-1]: _lowerCamelCase : str = solution.index(_lowerCamelCase ) if n == kn: continue _lowerCamelCase : int = copy.deepcopy(_lowerCamelCase ) _lowerCamelCase : Optional[int] = kn _lowerCamelCase : int = n _lowerCamelCase : Dict = 0 for k in _tmp[:-1]: _lowerCamelCase : Optional[int] = _tmp[_tmp.index(_lowerCamelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: _lowerCamelCase : Union[str, Any] = distance + int(i[1] ) _tmp.append(_lowerCamelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) _lowerCamelCase : str = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Tuple = 1 _lowerCamelCase : int = first_solution _lowerCamelCase : str = [] _lowerCamelCase : Tuple = distance_of_first_solution _lowerCamelCase : List[Any] = solution while count <= iters: _lowerCamelCase : Optional[int] = find_neighborhood(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : str = 0 _lowerCamelCase : Dict = neighborhood[index_of_best_solution] _lowerCamelCase : Optional[Any] = len(_lowerCamelCase ) - 1 _lowerCamelCase : List[str] = False while not found: _lowerCamelCase : Dict = 0 while i < len(_lowerCamelCase ): if best_solution[i] != solution[i]: _lowerCamelCase : Any = best_solution[i] _lowerCamelCase : List[Any] = solution[i] break _lowerCamelCase : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[int] = best_solution[:-1] _lowerCamelCase : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: _lowerCamelCase : Optional[Any] = cost _lowerCamelCase : List[str] = solution else: _lowerCamelCase : Tuple = index_of_best_solution + 1 _lowerCamelCase : List[str] = neighborhood[index_of_best_solution] if len(_lowerCamelCase ) >= size: tabu_list.pop(0 ) _lowerCamelCase : Optional[int] = count + 1 return best_solution_ever, best_cost def lowerCamelCase_( _lowerCamelCase=None ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[int] = generate_neighbours(args.File ) _lowerCamelCase, _lowerCamelCase : int = generate_first_solution( args.File , _lowerCamelCase ) _lowerCamelCase, _lowerCamelCase : Optional[int] = tabu_search( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , ) print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": _lowerCAmelCase : List[str] = argparse.ArgumentParser(description='''Tabu Search''') parser.add_argument( '''-f''', '''--File''', type=str, help='''Path to the file containing the data''', required=True, ) parser.add_argument( '''-i''', '''--Iterations''', type=int, help='''How many iterations the algorithm should perform''', required=True, ) parser.add_argument( '''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True ) # Pass the arguments to main method main(parser.parse_args())
340
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' for param in module.parameters(): _lowerCamelCase : Optional[int] = False def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Dict = plt.imshow(_lowerCamelCase ) fig.axes.get_xaxis().set_visible(_lowerCamelCase ) fig.axes.get_yaxis().set_visible(_lowerCamelCase ) plt.show() def lowerCamelCase_( ) -> str: '''simple docstring''' _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
340
1
"""simple docstring""" import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class A_ ( _a ): lowerCAmelCase__ = 'owlvit_text_model' def __init__( self: Any ,__lowerCAmelCase: List[Any]=49_408 ,__lowerCAmelCase: Any=512 ,__lowerCAmelCase: Tuple=2_048 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: Any=8 ,__lowerCAmelCase: List[str]=16 ,__lowerCAmelCase: int="quick_gelu" ,__lowerCAmelCase: List[Any]=1e-5 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: List[Any]=0.02 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=0 ,__lowerCAmelCase: Any=49_406 ,__lowerCAmelCase: List[str]=49_407 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : str = hidden_size _lowerCamelCase : int = intermediate_size _lowerCamelCase : Optional[int] = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : List[str] = max_position_embeddings _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : List[str] = attention_dropout _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : List[Any] = initializer_factor @classmethod def _lowercase ( cls: Dict ,__lowerCAmelCase: Union[str, os.PathLike] ,**__lowerCAmelCase: Tuple ): '''simple docstring''' cls._set_token_in_kwargs(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : List[str] = cls.get_config_dict(__lowerCAmelCase ,**__lowerCAmelCase ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": _lowerCamelCase : Any = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCAmelCase ,**__lowerCAmelCase ) class A_ ( _a ): lowerCAmelCase__ = 'owlvit_vision_model' def __init__( self: int ,__lowerCAmelCase: Union[str, Any]=768 ,__lowerCAmelCase: Tuple=3_072 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: List[Any]=768 ,__lowerCAmelCase: Optional[int]=32 ,__lowerCAmelCase: Optional[Any]="quick_gelu" ,__lowerCAmelCase: Union[str, Any]=1e-5 ,__lowerCAmelCase: List[Any]=0.0 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: int=1.0 ,**__lowerCAmelCase: Tuple ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) _lowerCamelCase : int = hidden_size _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Tuple = num_channels _lowerCamelCase : Optional[int] = image_size _lowerCamelCase : Any = patch_size _lowerCamelCase : str = hidden_act _lowerCamelCase : Dict = layer_norm_eps _lowerCamelCase : int = attention_dropout _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Optional[int] = initializer_factor @classmethod def _lowercase ( cls: Dict ,__lowerCAmelCase: Union[str, os.PathLike] ,**__lowerCAmelCase: List[str] ): '''simple docstring''' cls._set_token_in_kwargs(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : List[str] = cls.get_config_dict(__lowerCAmelCase ,**__lowerCAmelCase ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": _lowerCamelCase : List[Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCAmelCase ,**__lowerCAmelCase ) class A_ ( _a ): lowerCAmelCase__ = 'owlvit' lowerCAmelCase__ = True def __init__( self: str ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: List[Any]=2.65_92 ,__lowerCAmelCase: Optional[int]=True ,**__lowerCAmelCase: List[str] ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if text_config is None: _lowerCamelCase : Optional[int] = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." ) if vision_config is None: _lowerCamelCase : int = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." ) _lowerCamelCase : List[str] = OwlViTTextConfig(**__lowerCAmelCase ) _lowerCamelCase : List[str] = OwlViTVisionConfig(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = projection_dim _lowerCamelCase : Dict = logit_scale_init_value _lowerCamelCase : Optional[int] = return_dict _lowerCamelCase : Any = 1.0 @classmethod def _lowercase ( cls: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,**__lowerCAmelCase: Any ): '''simple docstring''' cls._set_token_in_kwargs(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : Any = cls.get_config_dict(__lowerCAmelCase ,**__lowerCAmelCase ) if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCAmelCase ,**__lowerCAmelCase ) @classmethod def _lowercase ( cls: Dict ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Dict ,**__lowerCAmelCase: List[str] ): '''simple docstring''' _lowerCamelCase : Any = {} _lowerCamelCase : int = text_config _lowerCamelCase : Union[str, Any] = vision_config return cls.from_dict(__lowerCAmelCase ,**__lowerCAmelCase ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : int = copy.deepcopy(self.__dict__ ) _lowerCamelCase : Optional[Any] = self.text_config.to_dict() _lowerCamelCase : str = self.vision_config.to_dict() _lowerCamelCase : List[Any] = self.__class__.model_type return output class A_ ( _a ): @property def _lowercase ( self: Any ): '''simple docstring''' return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def _lowercase ( self: List[Any] ): '''simple docstring''' return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def _lowercase ( self: str ): '''simple docstring''' return 1e-4 def _lowercase ( self: Any ,__lowerCAmelCase: "ProcessorMixin" ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: Optional["TensorType"] = None ,): '''simple docstring''' _lowerCamelCase : int = super().generate_dummy_inputs( processor.tokenizer ,batch_size=__lowerCAmelCase ,seq_length=__lowerCAmelCase ,framework=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = super().generate_dummy_inputs( processor.image_processor ,batch_size=__lowerCAmelCase ,framework=__lowerCAmelCase ) return {**text_input_dict, **image_input_dict} @property def _lowercase ( self: Dict ): '''simple docstring''' return 14
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) _lowerCamelCase : list = [] for char_count in range(_lowerCamelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_lowerCamelCase ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
340
1
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class A_ ( _a ): def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() _lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ): _lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,) # merge samples if i == 0: _lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample else: _lowerCamelCase : Optional[int] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,): '''simple docstring''' _lowerCamelCase : List[Any] = 0 _lowerCamelCase : str = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,) idx += 1 _lowerCamelCase : int = model_path_to_save + F"""_{idx}""" @classmethod def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : int = 0 _lowerCamelCase : str = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _lowerCamelCase : Dict = pretrained_model_path while os.path.isdir(__lowerCAmelCase ): _lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) controlnets.append(__lowerCAmelCase ) idx += 1 _lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" ) if len(__lowerCAmelCase ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(__lowerCAmelCase )
340
"""simple docstring""" _lowerCAmelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Any = [False] * len(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = [s] _lowerCamelCase : str = True while queue: _lowerCamelCase : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : Any = True _lowerCamelCase : Any = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = float("Inf" ) _lowerCamelCase : Dict = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _lowerCamelCase : Optional[Any] = sink while v != source: _lowerCamelCase : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : List[str] = parent[v] for i in range(len(_lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
340
1
"""simple docstring""" from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _lowerCAmelCase : Tuple = HfArgumentParser(InitializationArguments) _lowerCAmelCase : Optional[Any] = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _lowerCAmelCase : Union[str, Any] = { '''vocab_size''': len(tokenizer), '''scale_attn_by_inverse_layer_idx''': True, '''reorder_and_upcast_attn''': True, } # Load model config (GPT-2 large in this case) _lowerCAmelCase : Any = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _lowerCAmelCase : Any = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'camembert' def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Tuple = position_embedding_type _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Dict = classifier_dropout class A_ ( _a ): @property def _lowercase ( self: Any ): '''simple docstring''' if self.task == "multiple-choice": _lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: _lowerCamelCase : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
340
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[Any] = { '''configuration_blip_2''': [ '''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Blip2Config''', '''Blip2QFormerConfig''', '''Blip2VisionConfig''', ], '''processing_blip_2''': ['''Blip2Processor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = [ '''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Blip2Model''', '''Blip2QFormerModel''', '''Blip2PreTrainedModel''', '''Blip2ForConditionalGeneration''', '''Blip2VisionModel''', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys _lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
"""simple docstring""" from collections import defaultdict def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : str = True for v in tree[start]: if v not in visited: ret += dfs(_lowerCamelCase ) if ret % 2 == 0: cuts.append(_lowerCamelCase ) return ret def lowerCamelCase_( ) -> int: '''simple docstring''' dfs(1 ) if __name__ == "__main__": _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9 _lowerCAmelCase : str = defaultdict(list) _lowerCAmelCase : dict[int, bool] = {} _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Any = 0 _lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
1
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' def wrapper(*_lowerCamelCase , **_lowerCamelCase ): _lowerCamelCase : List[str] = timeit.default_timer() _lowerCamelCase : Optional[int] = func(*_lowerCamelCase , **_lowerCamelCase ) _lowerCamelCase : str = timeit.default_timer() - starttime return delta _lowerCamelCase : List[Any] = func.__name__ return wrapper def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=100 , _lowerCamelCase=None ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = [] _lowerCamelCase : Any = seq_shapes or {} for i in range(_lowerCamelCase ): _lowerCamelCase : Any = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(_lowerCamelCase , _ArrayXD ): _lowerCamelCase : Optional[int] = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(_lowerCamelCase , datasets.Value ): if v.dtype == "string": _lowerCamelCase : Dict = "The small grey turtle was surprisingly fast when challenged." else: _lowerCamelCase : List[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(_lowerCamelCase , datasets.Sequence ): while isinstance(_lowerCamelCase , datasets.Sequence ): _lowerCamelCase : Tuple = v.feature _lowerCamelCase : Optional[int] = seq_shapes[k] _lowerCamelCase : List[str] = np.random.rand(*_lowerCamelCase ).astype(v.dtype ) _lowerCamelCase : Union[str, Any] = data dummy_data.append((i, example) ) return dummy_data def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=100 , _lowerCamelCase=None ) -> str: '''simple docstring''' _lowerCamelCase : str = generate_examples(_lowerCamelCase , num_examples=_lowerCamelCase , seq_shapes=_lowerCamelCase ) with ArrowWriter(features=_lowerCamelCase , path=_lowerCamelCase ) as writer: for key, record in dummy_data: _lowerCamelCase : Union[str, Any] = features.encode_example(_lowerCamelCase ) writer.write(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) _lowerCamelCase : Union[str, Any] = datasets.Dataset.from_file(filename=_lowerCamelCase , info=datasets.DatasetInfo(features=_lowerCamelCase ) ) return dataset
340
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__''' _lowerCAmelCase : Dict = '''Dummy User''' _lowerCAmelCase : Optional[int] = '''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''' _lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co''' _lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(_lowerCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def lowerCamelCase_( ) -> str: '''simple docstring''' return HfApi(endpoint=_lowerCamelCase ) @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Dict = HfFolder.get_token() HfFolder.save_token(_lowerCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' def _cleanup_repo(_lowerCamelCase ): hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' @contextmanager def _temporary_repo(_lowerCamelCase ): try: yield repo_id finally: cleanup_repo(_lowerCamelCase ) return _temporary_repo @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
340
1
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCAmelCase : Optional[int] = '''\ @misc{wu2016googles, title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } ''' _lowerCAmelCase : Dict = '''\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the \'GLEU score\'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score\'s range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. ''' _lowerCAmelCase : Optional[int] = '''\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: \'google_bleu\': google_bleu score Examples: Example 1: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.44 Example 2: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.61 Example 3: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results["google_bleu"], 2)) 0.53 Example 4: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results["google_bleu"], 2)) 0.4 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): def _lowercase ( self: Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ), } ) ,) def _lowercase ( self: Tuple ,__lowerCAmelCase: List[List[List[str]]] ,__lowerCAmelCase: List[List[str]] ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: int = 4 ,): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__lowerCAmelCase ,hypotheses=__lowerCAmelCase ,min_len=__lowerCAmelCase ,max_len=__lowerCAmelCase ) }
340
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class A_ ( _a ): def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() _lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ): _lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,) # merge samples if i == 0: _lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample else: _lowerCamelCase : Optional[int] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,): '''simple docstring''' _lowerCamelCase : List[Any] = 0 _lowerCamelCase : str = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,) idx += 1 _lowerCamelCase : int = model_path_to_save + F"""_{idx}""" @classmethod def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : int = 0 _lowerCamelCase : str = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _lowerCamelCase : Dict = pretrained_model_path while os.path.isdir(__lowerCAmelCase ): _lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) controlnets.append(__lowerCAmelCase ) idx += 1 _lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" ) if len(__lowerCAmelCase ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(__lowerCAmelCase )
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase = 100 ) -> int: '''simple docstring''' _lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6 _lowerCamelCase : List[str] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'''{solution() = }''')
340
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _lowerCamelCase : Tuple = "segformer.encoder." + key if key.startswith("backbone" ): _lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" ) if "norm" in key: _lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" ) if "layer_norm1" in key: _lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )] _lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" ) if "attn.q" in key: _lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _lowerCamelCase : Tuple = key.replace("attn" , "attention.self" ) if "fc1" in key: _lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" ) if "fc2" in key: _lowerCamelCase : Dict = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _lowerCamelCase : int = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" ) _lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )] _lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" ) if key.startswith("head" ): _lowerCamelCase : List[str] = key.replace("head" , "classifier" ) _lowerCamelCase : Union[str, Any] = value return new_state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _lowerCamelCase : int = kv_weight[ : config.hidden_sizes[i], : ] _lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]] _lowerCamelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCamelCase : Optional[Any] = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase_( ) -> Dict: '''simple docstring''' _lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return image @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Any = SegformerConfig() _lowerCamelCase : int = False # set attributes based on model_name _lowerCamelCase : Any = "huggingface/label-files" if "segformer" in model_name: _lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _lowerCamelCase : str = 150 _lowerCamelCase : Dict = "ade20k-id2label.json" _lowerCamelCase : Dict = (1, 150, 128, 128) elif "city" in model_name: _lowerCamelCase : List[str] = 19 _lowerCamelCase : Tuple = "cityscapes-id2label.json" _lowerCamelCase : Tuple = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: _lowerCamelCase : List[str] = True _lowerCamelCase : Tuple = model_name[4:6] _lowerCamelCase : Tuple = 1000 _lowerCamelCase : List[Any] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : int = 256 elif size == "b2": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : List[Any] = 768 _lowerCamelCase : Any = [3, 4, 6, 3] elif size == "b3": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : Union[str, Any] = 768 _lowerCamelCase : Optional[Any] = [3, 4, 18, 3] elif size == "b4": _lowerCamelCase : str = [64, 128, 320, 512] _lowerCamelCase : Optional[Any] = 768 _lowerCamelCase : Dict = [3, 8, 27, 3] elif size == "b5": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : Tuple = 768 _lowerCamelCase : Tuple = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) _lowerCamelCase : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase ) # prepare image _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: _lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) ) else: _lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_lowerCamelCase , _lowerCamelCase ) # create HuggingFace model and load state dict if encoder_only: _lowerCamelCase : Tuple = False _lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase ) else: _lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() # forward pass _lowerCamelCase : Any = model(_lowerCamelCase ) _lowerCamelCase : Dict = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCamelCase : str = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]], [[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]], [[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCamelCase : int = torch.tensor( [ [[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]], [[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]], [[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]], [[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]], [[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]], [[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]], [[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]], [[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]], [[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCamelCase : Dict = torch.tensor( [ [[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]], [[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]], [[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCamelCase : Optional[int] = torch.tensor( [ [[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]], [[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]], [[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]], [[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]], [[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCamelCase : List[Any] = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]], [[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]], [[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCamelCase : Any = torch.tensor( [ [[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]], [[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]], [[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]], [[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]], [[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCamelCase : str = torch.tensor( [ [[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]], [[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]], [[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]], ] ) else: _lowerCamelCase : Dict = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : str = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
340
1
"""simple docstring""" from statistics import mean import numpy as np def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> list: '''simple docstring''' _lowerCamelCase : Union[str, Any] = 0 # Number of processes finished _lowerCamelCase : List[str] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. _lowerCamelCase : List[Any] = [0] * no_of_process # List to include calculation results _lowerCamelCase : Dict = [0] * no_of_process # Sort by arrival time. _lowerCamelCase : Union[str, Any] = [burst_time[i] for i in np.argsort(_lowerCamelCase )] _lowerCamelCase : Union[str, Any] = [process_name[i] for i in np.argsort(_lowerCamelCase )] arrival_time.sort() while no_of_process > finished_process_count: _lowerCamelCase : Union[str, Any] = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: _lowerCamelCase : Any = arrival_time[i] _lowerCamelCase : Optional[Any] = 0 # Index showing the location of the process being performed _lowerCamelCase : Union[str, Any] = 0 # Saves the current response ratio. _lowerCamelCase : List[str] = 0 for i in range(0 , _lowerCamelCase ): if finished_process[i] == 0 and arrival_time[i] <= current_time: _lowerCamelCase : List[Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: _lowerCamelCase : Optional[Any] = temp _lowerCamelCase : Optional[Any] = i # Calculate the turn around time _lowerCamelCase : List[str] = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. _lowerCamelCase : Dict = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> list: '''simple docstring''' _lowerCamelCase : List[str] = [0] * no_of_process for i in range(0 , _lowerCamelCase ): _lowerCamelCase : Optional[int] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _lowerCAmelCase : Any = 5 _lowerCAmelCase : int = ['''A''', '''B''', '''C''', '''D''', '''E'''] _lowerCAmelCase : Union[str, Any] = [1, 2, 3, 4, 5] _lowerCAmelCase : Dict = [1, 2, 3, 4, 5] _lowerCAmelCase : Optional[Any] = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _lowerCAmelCase : int = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t''' f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}''' ) print(f'''average waiting time : {mean(waiting_time):.5f}''') print(f'''average turn around time : {mean(turn_around_time):.5f}''')
340
"""simple docstring""" _lowerCAmelCase : dict[tuple[int, int, int], int] = {} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _lowerCamelCase : Optional[int] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 ) _lowerCamelCase : List[Any] = state_late + state_absent + state_ontime _lowerCamelCase : int = prizestrings return prizestrings def lowerCamelCase_( _lowerCamelCase = 30 ) -> int: '''simple docstring''' return _calculate(_lowerCamelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
340
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCAmelCase : Optional[int] = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : int = str(_lowerCamelCase ) return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" ) def lowerCamelCase_( ) -> int | None: '''simple docstring''' for base_num in range(9999 , 4999 , -1 ): _lowerCamelCase : Union[str, Any] = 100002 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate for base_num in range(333 , 99 , -1 ): _lowerCamelCase : Tuple = 1002003 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
340
1
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping _lowerCAmelCase : Tuple = tuple[int, int] class A_ : def __init__( self: Union[str, Any] ,__lowerCAmelCase: set[int] ,__lowerCAmelCase: Mapping[EdgeT, int] ): '''simple docstring''' _lowerCamelCase : set[int] = vertices _lowerCamelCase : dict[EdgeT, int] = { (min(__lowerCAmelCase ), max(__lowerCAmelCase )): weight for edge, weight in edges.items() } def _lowercase ( self: List[Any] ,__lowerCAmelCase: EdgeT ,__lowerCAmelCase: int ): '''simple docstring''' self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) _lowerCamelCase : Any = weight def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Graph = Graph({min(self.vertices )} ,{} ) _lowerCamelCase : EdgeT _lowerCamelCase : int _lowerCamelCase : EdgeT _lowerCamelCase : int while len(subgraph.vertices ) < len(self.vertices ): _lowerCamelCase : Tuple = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: _lowerCamelCase : Tuple = edge _lowerCamelCase : Optional[int] = weight subgraph.add_edge(__lowerCAmelCase ,__lowerCAmelCase ) return subgraph def lowerCamelCase_( _lowerCamelCase = "p107_network.txt" ) -> int: '''simple docstring''' _lowerCamelCase : str = os.path.abspath(os.path.dirname(_lowerCamelCase ) ) _lowerCamelCase : str = os.path.join(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : dict[EdgeT, int] = {} _lowerCamelCase : list[str] _lowerCamelCase : int _lowerCamelCase : int with open(_lowerCamelCase ) as f: _lowerCamelCase : List[Any] = f.read().strip().split("\n" ) _lowerCamelCase : Dict = [line.split("," ) for line in data] for edgea in range(1 , len(_lowerCamelCase ) ): for edgea in range(_lowerCamelCase ): if adjaceny_matrix[edgea][edgea] != "-": _lowerCamelCase : Tuple = int(adjaceny_matrix[edgea][edgea] ) _lowerCamelCase : Graph = Graph(set(range(len(_lowerCamelCase ) ) ) , _lowerCamelCase ) _lowerCamelCase : Graph = graph.prims_algorithm() _lowerCamelCase : int = sum(graph.edges.values() ) _lowerCamelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
340
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A_ ( _a ): lowerCAmelCase__ = 'char' lowerCAmelCase__ = 'bpe' lowerCAmelCase__ = 'wp' _lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A_ ( _a ): lowerCAmelCase__ = ['image_processor', 'char_tokenizer'] lowerCAmelCase__ = 'ViTImageProcessor' lowerCAmelCase__ = 'MgpstrTokenizer' def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,__lowerCAmelCase ,) _lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" ) _lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) _lowerCamelCase : List[str] = tokenizer _lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" ) _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ): '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is not None: _lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _lowerCamelCase : Tuple = encodings["input_ids"] return inputs def _lowercase ( self: int ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences _lowerCamelCase : Dict = char_preds.size(0 ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" ) _lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" ) _lowerCamelCase : List[str] = [] _lowerCamelCase : str = [] for i in range(__lowerCAmelCase ): _lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowerCamelCase : Tuple = {} _lowerCamelCase : Tuple = final_strs _lowerCamelCase : int = final_scores _lowerCamelCase : str = char_strs _lowerCamelCase : Dict = bpe_strs _lowerCamelCase : int = wp_strs return out def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ): '''simple docstring''' if format == DecodeType.CHARACTER: _lowerCamelCase : int = self.char_decode _lowerCamelCase : List[str] = 1 _lowerCamelCase : Optional[int] = "[s]" elif format == DecodeType.BPE: _lowerCamelCase : Dict = self.bpe_decode _lowerCamelCase : str = 2 _lowerCamelCase : Union[str, Any] = "#" elif format == DecodeType.WORDPIECE: _lowerCamelCase : int = self.wp_decode _lowerCamelCase : List[str] = 102 _lowerCamelCase : List[Any] = "[SEP]" else: raise ValueError(F"""Format {format} is not supported.""" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Any = pred_logits.size(0 ) _lowerCamelCase : int = pred_logits.size(1 ) _lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:] _lowerCamelCase : List[str] = decoder(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 ) _lowerCamelCase : Any = preds_max_prob[:, 1:] for index in range(__lowerCAmelCase ): _lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_str[index][:pred_eos] _lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist() _lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1 _lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1] _lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowerCAmelCase ) conf_scores.append(__lowerCAmelCase ) return dec_strs, conf_scores def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(__lowerCAmelCase ) def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs
340
1
"""simple docstring""" import colorsys from PIL import Image # type: ignore def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _lowerCamelCase : List[Any] = x _lowerCamelCase : int = y for step in range(_lowerCamelCase ): # noqa: B007 _lowerCamelCase : Tuple = a * a - b * b + x _lowerCamelCase : Any = 2 * a * b + y _lowerCamelCase : List[str] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase_( _lowerCamelCase ) -> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase_( _lowerCamelCase ) -> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_lowerCamelCase , 1 , 1 ) ) def lowerCamelCase_( _lowerCamelCase = 800 , _lowerCamelCase = 600 , _lowerCamelCase = -0.6 , _lowerCamelCase = 0 , _lowerCamelCase = 3.2 , _lowerCamelCase = 50 , _lowerCamelCase = True , ) -> Image.Image: '''simple docstring''' _lowerCamelCase : Optional[Any] = Image.new("RGB" , (image_width, image_height) ) _lowerCamelCase : Union[str, Any] = img.load() # loop through the image-coordinates for image_x in range(_lowerCamelCase ): for image_y in range(_lowerCamelCase ): # determine the figure-coordinates based on the image-coordinates _lowerCamelCase : Optional[Any] = figure_width / image_width * image_height _lowerCamelCase : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width _lowerCamelCase : Tuple = figure_center_y + (image_y / image_height - 0.5) * figure_height _lowerCamelCase : Optional[Any] = get_distance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: _lowerCamelCase : Optional[int] = get_color_coded_rgb(_lowerCamelCase ) else: _lowerCamelCase : Union[str, Any] = get_black_and_white_rgb(_lowerCamelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowerCAmelCase : Dict = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
340
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _lowerCAmelCase : List[Any] = get_logger(__name__) class A_ : lowerCAmelCase__ = 'dummy_data' lowerCAmelCase__ = 'datasets' lowerCAmelCase__ = False def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,): '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : List[str] = dataset_name _lowerCamelCase : Optional[int] = cache_dir _lowerCamelCase : Optional[int] = use_local_dummy_data _lowerCamelCase : int = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : int = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : Tuple = str(__lowerCAmelCase ) # to be downloaded _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Dict = None @property def _lowercase ( self: str ): '''simple docstring''' if self._dummy_file is None: _lowerCamelCase : List[str] = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self: str ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : Optional[int] = cached_path( __lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase ) return os.path.join(__lowerCAmelCase ,self.dummy_file_name ) @property def _lowercase ( self: Tuple ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def _lowercase ( self: List[str] ): '''simple docstring''' if self._bucket_url is None: _lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Tuple = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,(list, tuple) ): return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase ) else: return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' return path def _lowercase ( self: Optional[int] ): '''simple docstring''' return {} def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): for single_url in single_urls: download_callback(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = single_urls download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls] else: _lowerCamelCase : Union[str, Any] = single_urls _lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) _lowerCamelCase : List[Any] = value # make sure that values are unique if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url ) _lowerCamelCase : Optional[Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__lowerCAmelCase ) return dummy_data_list def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' def _iter_archive_members(__lowerCAmelCase: Any ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : Tuple = Path(self.dummy_file ).parent _lowerCamelCase : str = path.relative_to(__lowerCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase ) _lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = [paths] for path in paths: if os.path.isfile(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__lowerCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
340
1
"""simple docstring""" import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap _lowerCAmelCase : Union[str, Any] = '''Usage of script: script_name <size_of_canvas:int>''' _lowerCAmelCase : List[str] = [0] * 100 + [1] * 10 random.shuffle(choice) def lowerCamelCase_( _lowerCamelCase ) -> list[list[bool]]: '''simple docstring''' _lowerCamelCase : str = [[False for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )] return canvas def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' for i, row in enumerate(_lowerCamelCase ): for j, _ in enumerate(_lowerCamelCase ): _lowerCamelCase : Optional[int] = bool(random.getrandbits(1 ) ) def lowerCamelCase_( _lowerCamelCase ) -> list[list[bool]]: '''simple docstring''' _lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase ) _lowerCamelCase : Any = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(_lowerCamelCase ): for c, pt in enumerate(_lowerCamelCase ): _lowerCamelCase : int = __judge_point( _lowerCamelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) _lowerCamelCase : Optional[int] = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. _lowerCamelCase : list[list[bool]] = current_canvas.tolist() return return_canvas def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : Dict = 0 _lowerCamelCase : Optional[int] = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. _lowerCamelCase : Tuple = pt if pt: if alive < 2: _lowerCamelCase : List[str] = False elif alive == 2 or alive == 3: _lowerCamelCase : Tuple = True elif alive > 3: _lowerCamelCase : List[str] = False else: if alive == 3: _lowerCamelCase : Dict = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) _lowerCAmelCase : Dict = int(sys.argv[1]) # main working structure of this module. _lowerCAmelCase : List[str] = create_canvas(canvas_size) seed(c) _lowerCAmelCase , _lowerCAmelCase : List[Any] = plt.subplots() fig.show() _lowerCAmelCase : Optional[int] = ListedColormap(['''w''', '''k''']) try: while True: _lowerCAmelCase : str = run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
340
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) _lowerCamelCase : int = precision _lowerCamelCase : Dict = ceil(precision / 14 ) _lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt() _lowerCamelCase : int = 1 _lowerCamelCase : Optional[int] = 13591409 _lowerCamelCase : int = Decimal(_lowerCamelCase ) for k in range(1 , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
340
1
"""simple docstring""" from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
340
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ ( _a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCamelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCamelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowerCamelCase : str = [] for i in range(_lowerCamelCase ): _lowerCamelCase : Any = i / num_diffusion_timesteps _lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) ) return torch.tensor(_lowerCamelCase , dtype=torch.floataa ) class A_ ( _a , _a ): @register_to_config def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) _lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = 1.0 - self.betas _lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 ) _lowerCamelCase : int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowerCamelCase : Tuple = 1.0 # setable values _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() ) _lowerCamelCase : List[str] = variance_type def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ): '''simple docstring''' return sample def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ): '''simple docstring''' _lowerCamelCase : str = num_inference_steps _lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ): '''simple docstring''' if prev_timestep is None: _lowerCamelCase : List[str] = t - 1 _lowerCamelCase : Optional[int] = self.alphas_cumprod[t] _lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : List[Any] = self.betas[t] else: _lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowerCamelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) ) _lowerCamelCase : str = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowerCamelCase : str = variance.log() _lowerCamelCase : str = beta.log() _lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2 _lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,): '''simple docstring''' _lowerCamelCase : str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 ) else: _lowerCamelCase : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: _lowerCamelCase : List[Any] = t - 1 _lowerCamelCase : Dict = self.alphas_cumprod[t] _lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Any = self.betas[t] _lowerCamelCase : str = self.alphas[t] else: _lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev _lowerCamelCase : Optional[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowerCamelCase : List[Any] = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowerCamelCase : Any = torch.clamp( __lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Union[str, Any] = 0 if t > 0: _lowerCamelCase : Dict = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device ) _lowerCamelCase : Any = self._get_variance( __lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,) if self.variance_type == "fixed_small_log": _lowerCamelCase : Optional[Any] = variance elif self.variance_type == "learned_range": _lowerCamelCase : Optional[int] = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) _lowerCamelCase : Dict = variance * variance_noise _lowerCamelCase : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,): '''simple docstring''' _lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) _lowerCamelCase : Any = timesteps.to(original_samples.device ) _lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 _lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
340
1
"""simple docstring""" import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py _lowerCAmelCase : Optional[int] = '''.''' if __name__ == "__main__": _lowerCAmelCase : Tuple = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''') _lowerCAmelCase : List[Any] = [] _lowerCAmelCase : Optional[int] = [] with open(doctest_file_path) as fp: for line in fp: _lowerCAmelCase : str = line.strip() _lowerCAmelCase : int = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: _lowerCAmelCase : Any = '''\n'''.join(non_existent_paths) raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
340
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : str = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' for attribute in key.split("." ): _lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: _lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: _lowerCamelCase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCamelCase : Tuple = value elif weight_type == "weight_g": _lowerCamelCase : List[str] = value elif weight_type == "weight_v": _lowerCamelCase : List[Any] = value elif weight_type == "bias": _lowerCamelCase : str = value elif weight_type == "running_mean": _lowerCamelCase : Optional[int] = value elif weight_type == "running_var": _lowerCamelCase : Optional[Any] = value elif weight_type == "num_batches_tracked": _lowerCamelCase : int = value elif weight_type == "inv_freq": _lowerCamelCase : List[str] = value else: _lowerCamelCase : Optional[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = fairseq_model.state_dict() _lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) _lowerCamelCase : List[Any] = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _lowerCamelCase : int = True if "*" in mapped_key: _lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2] _lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase ) if "pos_bias_u" in name: _lowerCamelCase : int = None elif "pos_bias_v" in name: _lowerCamelCase : Any = None elif "weight_g" in name: _lowerCamelCase : Any = "weight_g" elif "weight_v" in name: _lowerCamelCase : Any = "weight_v" elif "bias" in name: _lowerCamelCase : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCamelCase : Dict = "weight" elif "running_mean" in name: _lowerCamelCase : str = "running_mean" elif "inv_freq" in name: _lowerCamelCase : List[Any] = "inv_freq" elif "running_var" in name: _lowerCamelCase : Tuple = "running_var" elif "num_batches_tracked" in name: _lowerCamelCase : str = "num_batches_tracked" else: _lowerCamelCase : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = full_name.split("conv_layers." )[-1] _lowerCamelCase : List[Any] = name.split("." ) _lowerCamelCase : Union[str, Any] = int(items[0] ) _lowerCamelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCamelCase : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCamelCase : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCamelCase : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict: '''simple docstring''' if config_path is not None: _lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" ) else: _lowerCamelCase : Dict = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCamelCase : List[Any] = "rotary" if is_finetuned: if dict_path: _lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : Optional[int] = target_dict.pad_index _lowerCamelCase : Dict = target_dict.bos_index _lowerCamelCase : Optional[Any] = target_dict.eos_index _lowerCamelCase : str = len(target_dict.symbols ) _lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) _lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) _lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) _lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase ) else: _lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" ) _lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) _lowerCamelCase : Dict = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' if not head: return True # split the list to two parts _lowerCamelCase, _lowerCamelCase : str = head.next, head while fast and fast.next: _lowerCamelCase : Union[str, Any] = fast.next.next _lowerCamelCase : Optional[int] = slow.next _lowerCamelCase : List[Any] = slow.next _lowerCamelCase : Any = None # Don't forget here! But forget still works! # reverse the second part _lowerCamelCase : Dict = None while second: _lowerCamelCase : Optional[Any] = second.next _lowerCamelCase : Optional[int] = node _lowerCamelCase : Optional[Any] = second _lowerCamelCase : List[Any] = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False _lowerCamelCase : int = node.next _lowerCamelCase : Union[str, Any] = head.next return True def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) _lowerCamelCase : Optional[Any] = head while fast and fast.next: _lowerCamelCase, _lowerCamelCase : List[str] = fast.next.next, slow.next # 2. Push the second half into the stack _lowerCamelCase : int = [slow.val] while slow.next: _lowerCamelCase : List[str] = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False _lowerCamelCase : List[Any] = cur.next return True def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' if not head or not head.next: return True _lowerCamelCase : int = {} _lowerCamelCase : Any = 0 while head: if head.val in d: d[head.val].append(_lowerCamelCase ) else: _lowerCamelCase : Optional[int] = [pos] _lowerCamelCase : Union[str, Any] = head.next pos += 1 _lowerCamelCase : Tuple = pos - 1 _lowerCamelCase : Union[str, Any] = 0 for v in d.values(): if len(_lowerCamelCase ) % 2 != 0: middle += 1 else: _lowerCamelCase : int = 0 for i in range(0 , len(_lowerCamelCase ) ): if v[i] + v[len(_lowerCamelCase ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) ) def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' if point: if isinstance(_lowerCamelCase , _lowerCamelCase ): for item in point: if not isinstance(_lowerCamelCase , (int, float) ): _lowerCamelCase : Dict = ( "Expected a list of numbers as input, found " F"""{type(_lowerCamelCase ).__name__}""" ) raise TypeError(_lowerCamelCase ) else: _lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}""" raise TypeError(_lowerCamelCase ) else: raise ValueError("Missing an input" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
340
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class A_ : lowerCAmelCase__ = LEDConfig lowerCAmelCase__ = {} lowerCAmelCase__ = 'gelu' def __init__( self: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any]=13 ,__lowerCAmelCase: Union[str, Any]=7 ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: List[Any]=99 ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=4 ,__lowerCAmelCase: List[Any]=37 ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: Tuple=20 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: Optional[int]=1 ,__lowerCAmelCase: List[str]=0 ,__lowerCAmelCase: Dict=4 ,): '''simple docstring''' _lowerCamelCase : Union[str, Any] = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Tuple = seq_length _lowerCamelCase : Optional[int] = is_training _lowerCamelCase : Dict = use_labels _lowerCamelCase : Optional[Any] = vocab_size _lowerCamelCase : Tuple = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Any = intermediate_size _lowerCamelCase : Tuple = hidden_dropout_prob _lowerCamelCase : Optional[int] = attention_probs_dropout_prob _lowerCamelCase : int = max_position_embeddings _lowerCamelCase : Union[str, Any] = eos_token_id _lowerCamelCase : Optional[Any] = pad_token_id _lowerCamelCase : Optional[int] = bos_token_id _lowerCamelCase : Any = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _lowerCamelCase : str = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _lowerCamelCase : int = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) _lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) _lowerCamelCase : Dict = tf.concat([input_ids, eos_tensor] ,axis=1 ) _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) _lowerCamelCase : Optional[Any] = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,attention_window=self.attention_window ,**self.config_updates ,) _lowerCamelCase : List[Any] = prepare_led_inputs_dict(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tf.concat( [tf.zeros_like(__lowerCAmelCase )[:, :-1], tf.ones_like(__lowerCAmelCase )[:, -1:]] ,axis=-1 ,) _lowerCamelCase : int = global_attention_mask return config, inputs_dict def _lowercase ( self: Dict ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ): '''simple docstring''' _lowerCamelCase : str = TFLEDModel(config=__lowerCAmelCase ).get_decoder() _lowerCamelCase : Optional[int] = inputs_dict["input_ids"] _lowerCamelCase : List[Any] = input_ids[:1, :] _lowerCamelCase : Tuple = inputs_dict["attention_mask"][:1, :] _lowerCamelCase : Optional[int] = 1 # first forward pass _lowerCamelCase : Any = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,use_cache=__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : int = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) ,config.vocab_size ) _lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and _lowerCamelCase : int = tf.concat([input_ids, next_tokens] ,axis=-1 ) _lowerCamelCase : Dict = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) _lowerCamelCase : Any = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase )[0] _lowerCamelCase : Tuple = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,past_key_values=__lowerCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice _lowerCamelCase : Tuple = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) _lowerCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx] _lowerCamelCase : Any = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__lowerCAmelCase ,__lowerCAmelCase ,rtol=1e-3 ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _lowerCamelCase : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _lowerCamelCase : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowerCamelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class A_ ( _a , _a , unittest.TestCase ): lowerCAmelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () lowerCAmelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase__ = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = TFLEDModelTester(self ) _lowerCamelCase : str = ConfigTester(self ,config_class=__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Optional[Any] = tf.zeros_like(inputs_dict["attention_mask"] ) _lowerCamelCase : List[str] = 2 _lowerCamelCase : Optional[Any] = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices ,1 ,inputs_dict["global_attention_mask"] ,) _lowerCamelCase : Tuple = True _lowerCamelCase : List[str] = self.model_tester.seq_length _lowerCamelCase : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(__lowerCAmelCase: Tuple ): _lowerCamelCase : Union[str, Any] = outputs.decoder_attentions self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,) def check_encoder_attentions_output(__lowerCAmelCase: Union[str, Any] ): _lowerCamelCase : List[str] = [t.numpy() for t in outputs.encoder_attentions] _lowerCamelCase : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers ) self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,) self.assertListEqual( list(global_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] ,) for model_class in self.all_model_classes: _lowerCamelCase : Any = True _lowerCamelCase : str = False _lowerCamelCase : List[str] = False _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Any = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) ) _lowerCamelCase : List[str] = len(__lowerCAmelCase ) self.assertEqual(config.output_hidden_states ,__lowerCAmelCase ) check_encoder_attentions_output(__lowerCAmelCase ) if self.is_encoder_decoder: _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : int = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) ) self.assertEqual(config.output_hidden_states ,__lowerCAmelCase ) check_decoder_attentions_output(__lowerCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _lowerCamelCase : Tuple = True _lowerCamelCase : int = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) ) self.assertEqual(config.output_hidden_states ,__lowerCAmelCase ) check_encoder_attentions_output(__lowerCAmelCase ) # Check attention is always last and order is fine _lowerCamelCase : List[str] = True _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : List[str] = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(__lowerCAmelCase ) ) self.assertEqual(model.config.output_hidden_states ,__lowerCAmelCase ) check_encoder_attentions_output(__lowerCAmelCase ) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' return tf.constant(_lowerCamelCase , dtype=tf.intaa ) _lowerCAmelCase : str = 1e-4 @slow @require_tf class A_ ( unittest.TestCase ): def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : int = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led # change to intended input here _lowerCamelCase : Optional[int] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _lowerCamelCase : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _lowerCamelCase : Tuple = prepare_led_inputs_dict(model.config ,__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Any = model(**__lowerCAmelCase )[0] _lowerCamelCase : Union[str, Any] = (1, 1_024, 768) self.assertEqual(output.shape ,__lowerCAmelCase ) # change to expected output here _lowerCamelCase : str = tf.convert_to_tensor( [[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] ,) tf.debugging.assert_near(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-3 ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : List[str] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ) # change to intended input here _lowerCamelCase : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _lowerCamelCase : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _lowerCamelCase : Dict = prepare_led_inputs_dict(model.config ,__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = model(**__lowerCAmelCase )[0] _lowerCamelCase : List[Any] = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape ,__lowerCAmelCase ) # change to expected output here _lowerCamelCase : Dict = tf.convert_to_tensor( [[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] ,) tf.debugging.assert_near(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-3 ,rtol=1e-3 )
340
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[Any] = np.inf def set_batch_size(_lowerCamelCase ) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary": _lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_lowerCamelCase , _lowerCamelCase ) return None if batch_size is np.inf else batch_size class A_ ( _a ): def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths} _lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1] _lowerCamelCase : int = Parquet( cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,) def _lowercase ( self: Optional[int] ): '''simple docstring''' if self.streaming: _lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCamelCase : Tuple = None _lowerCamelCase : Optional[int] = None _lowerCamelCase : List[str] = None _lowerCamelCase : str = None self.builder.download_and_prepare( download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,) _lowerCamelCase : Any = self.builder.as_dataset( split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset class A_ : def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' _lowerCamelCase : Any = dataset _lowerCamelCase : Any = path_or_buf _lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features ) _lowerCamelCase : List[str] = parquet_writer_kwargs def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with open(self.path_or_buf ,"wb+" ) as buffer: _lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) else: _lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) return written def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[str] = 0 _lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase ) _lowerCamelCase : List[str] = self.dataset.features.arrow_schema _lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase ) for offset in logging.tqdm( range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,): _lowerCamelCase : List[str] = query_table( table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,) writer.write_table(__lowerCAmelCase ) written += batch.nbytes writer.close() return written
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError("iterations must be defined as integers" ) if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) _lowerCamelCase : Tuple = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_lowerCamelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
340
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Optional[int] = {} _lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"] _lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] ) return output _lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments) _lowerCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: _lowerCAmelCase : Any = multiprocessing.cpu_count() _lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowerCAmelCase : Union[str, Any] = time.time() _lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''') print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') _lowerCAmelCase : Any = time.time() _lowerCAmelCase : Dict = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') _lowerCAmelCase : str = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
340
1
"""simple docstring""" from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake _lowerCAmelCase : int = numpy.array([0, 0]) _lowerCAmelCase : Dict = numpy.array([0.5, 0.8_660_254]) _lowerCAmelCase : str = numpy.array([1, 0]) _lowerCAmelCase : Union[str, Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[numpy.ndarray]: '''simple docstring''' _lowerCamelCase : Dict = initial_vectors for _ in range(_lowerCamelCase ): _lowerCamelCase : int = iteration_step(_lowerCamelCase ) return vectors def lowerCamelCase_( _lowerCamelCase ) -> list[numpy.ndarray]: '''simple docstring''' _lowerCamelCase : Union[str, Any] = [] for i, start_vector in enumerate(vectors[:-1] ): _lowerCamelCase : Union[str, Any] = vectors[i + 1] new_vectors.append(_lowerCamelCase ) _lowerCamelCase : Optional[int] = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> numpy.ndarray: '''simple docstring''' _lowerCamelCase : int = numpy.radians(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = numpy.cos(_lowerCamelCase ), numpy.sin(_lowerCamelCase ) _lowerCamelCase : str = numpy.array(((c, -s), (s, c)) ) return numpy.dot(_lowerCamelCase , _lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' _lowerCamelCase : Optional[Any] = plt.gca() axes.set_aspect("equal" ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() _lowerCamelCase, _lowerCamelCase : Union[str, Any] = zip(*_lowerCamelCase ) plt.plot(_lowerCamelCase , _lowerCamelCase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Any = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
340
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[Any] = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : int = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A_ ( _a ): lowerCAmelCase__ = 'mobilenet_v1' def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[Any] = depth_multiplier _lowerCamelCase : Any = min_depth _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Dict = tf_padding _lowerCamelCase : Union[str, Any] = classifier_dropout_prob _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowercase ( self: Any ): '''simple docstring''' return 1e-4
340
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig _lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) class A_ ( _a ): lowerCAmelCase__ = 'masked_bert' def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : str = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : int = pruning_method _lowerCamelCase : str = mask_init _lowerCamelCase : List[Any] = mask_scale
340
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''', '''Salesforce/blip-vqa-capfit-large''': ( '''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json''' ), '''Salesforce/blip-image-captioning-base''': ( '''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json''' ), '''Salesforce/blip-image-captioning-large''': ( '''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json''' ), '''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''', '''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''', '''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''', '''Salesforce/blip-itm-large-flikr''': ( '''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'blip_text_model' def __init__( self: List[Any] ,__lowerCAmelCase: Union[str, Any]=30_524 ,__lowerCAmelCase: Any=768 ,__lowerCAmelCase: Union[str, Any]=768 ,__lowerCAmelCase: str=3_072 ,__lowerCAmelCase: Any=768 ,__lowerCAmelCase: Tuple=12 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[Any]=512 ,__lowerCAmelCase: str="gelu" ,__lowerCAmelCase: Optional[Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0.0 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: List[str]=30_522 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: List[str]=0 ,__lowerCAmelCase: Any=102 ,__lowerCAmelCase: int=True ,__lowerCAmelCase: int=True ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__( pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,sep_token_id=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : int = vocab_size _lowerCamelCase : List[Any] = hidden_size _lowerCamelCase : Union[str, Any] = encoder_hidden_size _lowerCamelCase : int = intermediate_size _lowerCamelCase : str = projection_dim _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : List[str] = max_position_embeddings _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : str = hidden_act _lowerCamelCase : int = initializer_range _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Union[str, Any] = is_decoder _lowerCamelCase : List[Any] = use_cache @classmethod def _lowercase ( cls: List[str] ,__lowerCAmelCase: Union[str, os.PathLike] ,**__lowerCAmelCase: Tuple ): '''simple docstring''' cls._set_token_in_kwargs(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : Any = cls.get_config_dict(__lowerCAmelCase ,**__lowerCAmelCase ) # get the text config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": _lowerCamelCase : Optional[Any] = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCAmelCase ,**__lowerCAmelCase ) class A_ ( _a ): lowerCAmelCase__ = 'blip_vision_model' def __init__( self: str ,__lowerCAmelCase: Dict=768 ,__lowerCAmelCase: str=3_072 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: Tuple=12 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: Any=384 ,__lowerCAmelCase: int=16 ,__lowerCAmelCase: str="gelu" ,__lowerCAmelCase: str=1e-5 ,__lowerCAmelCase: int=0.0 ,__lowerCAmelCase: List[str]=1e-10 ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Any = intermediate_size _lowerCamelCase : int = projection_dim _lowerCamelCase : Any = num_hidden_layers _lowerCamelCase : List[str] = num_attention_heads _lowerCamelCase : int = patch_size _lowerCamelCase : List[str] = image_size _lowerCamelCase : int = initializer_range _lowerCamelCase : str = attention_dropout _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : str = hidden_act @classmethod def _lowercase ( cls: Any ,__lowerCAmelCase: Union[str, os.PathLike] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' cls._set_token_in_kwargs(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : Optional[int] = cls.get_config_dict(__lowerCAmelCase ,**__lowerCAmelCase ) # get the vision config dict if we are loading from BlipConfig if config_dict.get("model_type" ) == "blip": _lowerCamelCase : Optional[Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowerCAmelCase ,**__lowerCAmelCase ) class A_ ( _a ): lowerCAmelCase__ = 'blip' lowerCAmelCase__ = True def __init__( self: Any ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: List[Any]=2.65_92 ,__lowerCAmelCase: int=256 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if text_config is None: _lowerCamelCase : str = {} logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." ) if vision_config is None: _lowerCamelCase : Tuple = {} logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." ) _lowerCamelCase : Union[str, Any] = BlipTextConfig(**__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = BlipVisionConfig(**__lowerCAmelCase ) _lowerCamelCase : str = self.vision_config.hidden_size _lowerCamelCase : List[Any] = projection_dim _lowerCamelCase : List[Any] = logit_scale_init_value _lowerCamelCase : List[str] = 1.0 _lowerCamelCase : Optional[Any] = 0.02 _lowerCamelCase : Optional[int] = image_text_hidden_size @classmethod def _lowercase ( cls: Union[str, Any] ,__lowerCAmelCase: BlipTextConfig ,__lowerCAmelCase: BlipVisionConfig ,**__lowerCAmelCase: Any ): '''simple docstring''' return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**__lowerCAmelCase ) def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : str = copy.deepcopy(self.__dict__ ) _lowerCamelCase : Tuple = self.text_config.to_dict() _lowerCamelCase : Any = self.vision_config.to_dict() _lowerCamelCase : List[Any] = self.__class__.model_type return output
340
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model _lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]: '''simple docstring''' if rng is None: _lowerCamelCase : Union[str, Any] = random.Random() _lowerCamelCase : Union[str, Any] = 1 for dim in shape: total_dims *= dim _lowerCamelCase : Optional[int] = [] for _ in range(_lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase ) return output def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase ) # make sure that at least one token is attended to for each batch _lowerCamelCase : List[str] = 1 return attn_mask @require_flax class A_ : lowerCAmelCase__ = None lowerCAmelCase__ = () def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _lowerCamelCase : List[str] = 2 _lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2 _lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length] _lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase ) _lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _lowerCamelCase : List[str] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = False _lowerCamelCase : Dict = max_length _lowerCamelCase : Tuple = 0 for model_class in self.all_generative_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval() _lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params ) _lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences _lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config() _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[int] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = max_length _lowerCamelCase : Dict = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[str] = model_class(__lowerCAmelCase ) _lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config() _lowerCamelCase : Tuple = False _lowerCamelCase : Union[str, Any] = max_length _lowerCamelCase : List[str] = 2 _lowerCamelCase : Optional[int] = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() _lowerCamelCase : int = True _lowerCamelCase : List[Any] = max_length _lowerCamelCase : Optional[Any] = 0.8 _lowerCamelCase : Union[str, Any] = 10 _lowerCamelCase : List[str] = 0.3 _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : str = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : int = jit(model.generate ) _lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[str] = max_length _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : Dict = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Any = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() _lowerCamelCase : Dict = max_length _lowerCamelCase : List[Any] = 2 _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[str] = 8 _lowerCamelCase : List[Any] = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : int = model_class(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : Dict = False _lowerCamelCase : Any = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : int = 2 _lowerCamelCase : int = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) @require_flax class A_ ( unittest.TestCase ): def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) _lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) _lowerCamelCase : Optional[Any] = "Hello world" _lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ): model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ): _lowerCamelCase : List[str] = {"foo": "bar"} model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
340
1
"""simple docstring""" # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : int = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A_ ( _a ): lowerCAmelCase__ = 'mobilenet_v1' def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[Any] = depth_multiplier _lowerCamelCase : Any = min_depth _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Dict = tf_padding _lowerCamelCase : Union[str, Any] = classifier_dropout_prob _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowercase ( self: Any ): '''simple docstring''' return 1e-4
340
1
"""simple docstring""" import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) _lowerCAmelCase : Optional[int] = [ '''cross_validation.py''', '''gradient_accumulation.py''', '''local_sgd.py''', '''multi_process_metrics.py''', '''memory.py''', '''automatic_gradient_accumulation.py''', '''fsdp_with_peak_mem_tracking.py''', '''deepspeed_with_config_support.py''', '''megatron_lm_gpt_pretraining.py''', ] class A_ ( unittest.TestCase ): def _lowercase ( self: str ,__lowerCAmelCase: str ,__lowerCAmelCase: bool ,__lowerCAmelCase: str = None ,__lowerCAmelCase: list = None ): '''simple docstring''' _lowerCamelCase : Dict = None _lowerCamelCase : Dict = os.path.abspath(os.path.join("examples" ,"by_feature" ) ) _lowerCamelCase : str = os.path.abspath("examples" ) for item in os.listdir(__lowerCAmelCase ): if item not in EXCLUDE_EXAMPLES: _lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) if os.path.isfile(__lowerCAmelCase ) and ".py" in item_path: with self.subTest( tested_script=__lowerCAmelCase ,feature_script=__lowerCAmelCase ,tested_section="main()" if parser_only else "training_function()" ,): _lowerCamelCase : str = compare_against_test( os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : List[Any] = "\n".join(__lowerCAmelCase ) if special_strings is not None: for string in special_strings: _lowerCamelCase : Any = diff.replace(__lowerCAmelCase ,"" ) self.assertEqual(__lowerCAmelCase ,"" ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' self.one_complete_example("complete_nlp_example.py" ,__lowerCAmelCase ) self.one_complete_example("complete_nlp_example.py" ,__lowerCAmelCase ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : Optional[int] = os.path.abspath(os.path.join("examples" ,"cv_example.py" ) ) _lowerCamelCase : Any = [ " " * 16 + "{\n\n", " " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n", " " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n", " " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n", " " * 20 + "\"epoch\": epoch,\n\n", " " * 16 + "},\n\n", " " * 16 + "step=epoch,\n", " " * 12, " " * 8 + "for step, batch in enumerate(active_dataloader):\n", ] self.one_complete_example("complete_cv_example.py" ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) self.one_complete_example("complete_cv_example.py" ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) @mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} ) class A_ ( _a ): lowerCAmelCase__ = False @classmethod def _lowercase ( cls: Optional[int] ): '''simple docstring''' super().setUpClass() _lowerCamelCase : Optional[int] = tempfile.mkdtemp() _lowerCamelCase : Optional[int] = os.path.join(cls._tmpdir ,"default_config.yml" ) write_basic_config(save_location=cls.configPath ) _lowerCamelCase : List[Any] = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def _lowercase ( cls: Tuple ): '''simple docstring''' super().tearDownClass() shutil.rmtree(cls._tmpdir ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : Optional[int] = F""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,"epoch_0" ) ) ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Any = F""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() _lowerCamelCase : Optional[int] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,"step_2" ) ) ) def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Optional[int] = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir ,'epoch_0' )} """.split() _lowerCamelCase : Dict = run_command(self._launch_args + testargs ,return_stdout=__lowerCAmelCase ) self.assertNotIn("epoch 0:" ,__lowerCAmelCase ) self.assertIn("epoch 1:" ,__lowerCAmelCase ) def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : List[Any] = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir ,'step_2' )} """.split() _lowerCamelCase : Dict = run_command(self._launch_args + testargs ,return_stdout=__lowerCAmelCase ) if torch.cuda.is_available(): _lowerCamelCase : List[Any] = torch.cuda.device_count() else: _lowerCamelCase : Optional[int] = 1 if num_processes > 1: self.assertNotIn("epoch 0:" ,__lowerCAmelCase ) self.assertIn("epoch 1:" ,__lowerCAmelCase ) else: self.assertIn("epoch 0:" ,__lowerCAmelCase ) self.assertIn("epoch 1:" ,__lowerCAmelCase ) @slow def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Tuple = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split() with mock.patch.dict(os.environ ,{"TESTING_MOCKED_DATALOADERS": "0"} ): _lowerCamelCase : Any = run_command(self._launch_args + testargs ,return_stdout=__lowerCAmelCase ) _lowerCamelCase : Any = re.findall("({.+})" ,__lowerCAmelCase ) _lowerCamelCase : str = [r for r in results if "accuracy" in r][-1] _lowerCamelCase : Union[str, Any] = ast.literal_eval(__lowerCAmelCase ) self.assertGreaterEqual(results["accuracy"] ,0.75 ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : List[Any] = ["examples/by_feature/multi_process_metrics.py"] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ ,{"WANDB_MODE": "offline"} ) def _lowercase ( self: int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: _lowerCamelCase : int = F""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase ,"tracking" ) ) ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = ["examples/by_feature/gradient_accumulation.py"] run_command(self._launch_args + testargs ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] = ["examples/by_feature/local_sgd.py"] run_command(self._launch_args + testargs )
340
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' for param in module.parameters(): _lowerCamelCase : Optional[int] = False def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Dict = plt.imshow(_lowerCamelCase ) fig.axes.get_xaxis().set_visible(_lowerCamelCase ) fig.axes.get_yaxis().set_visible(_lowerCamelCase ) plt.show() def lowerCamelCase_( ) -> str: '''simple docstring''' _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
340
1
"""simple docstring""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : List[Any] = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : List[str] = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] ) _lowerCamelCase : Union[str, Any] = MaskFormerConfig(backbone_config=_lowerCamelCase ) _lowerCamelCase : List[Any] = "huggingface/label-files" if "ade20k-full" in model_name: # this should be ok _lowerCamelCase : Union[str, Any] = 847 _lowerCamelCase : Union[str, Any] = "maskformer-ade20k-full-id2label.json" elif "ade" in model_name: # this should be ok _lowerCamelCase : List[Any] = 150 _lowerCamelCase : Dict = "ade20k-id2label.json" elif "coco-stuff" in model_name: # this should be ok _lowerCamelCase : int = 171 _lowerCamelCase : List[Any] = "maskformer-coco-stuff-id2label.json" elif "coco" in model_name: # TODO _lowerCamelCase : Dict = 133 _lowerCamelCase : Any = "coco-panoptic-id2label.json" elif "cityscapes" in model_name: # this should be ok _lowerCamelCase : Any = 19 _lowerCamelCase : Dict = "cityscapes-id2label.json" elif "vistas" in model_name: # this should be ok _lowerCamelCase : int = 65 _lowerCamelCase : Optional[Any] = "mapillary-vistas-id2label.json" _lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : Dict = {int(_lowerCamelCase ): v for k, v in idalabel.items()} return config def lowerCamelCase_( _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : int = [] # stem # fmt: off rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") ) # FPN rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") ) rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") ) rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") ) # cross-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") ) # MLP 1 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") ) # MLP 2 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") ) # layernorm 1 (self-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") ) # layernorm 3 (final layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") ) # heads on top rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") ) rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") ) rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") ) for i in range(3 ): rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") ) # fmt: on return rename_keys def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = dct.pop(_lowerCamelCase ) _lowerCamelCase : Optional[int] = val def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowerCamelCase : Any = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowerCamelCase : str = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" ) _lowerCamelCase : Any = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Optional[Any] = in_proj_weight[:dim, :] _lowerCamelCase : Union[str, Any] = in_proj_bias[: dim] _lowerCamelCase : List[str] = in_proj_weight[ dim : dim * 2, : ] _lowerCamelCase : Union[str, Any] = in_proj_bias[ dim : dim * 2 ] _lowerCamelCase : List[str] = in_proj_weight[ -dim :, : ] _lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :] # fmt: on def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[int] = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) _lowerCamelCase : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" ) _lowerCamelCase : Dict = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Union[str, Any] = in_proj_weight[: hidden_size, :] _lowerCamelCase : Tuple = in_proj_bias[:config.hidden_size] _lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :] _lowerCamelCase : Any = in_proj_bias[hidden_size : hidden_size * 2] _lowerCamelCase : str = in_proj_weight[-hidden_size :, :] _lowerCamelCase : List[str] = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) _lowerCamelCase : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" ) _lowerCamelCase : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Any = in_proj_weight[: hidden_size, :] _lowerCamelCase : str = in_proj_bias[:config.hidden_size] _lowerCamelCase : Union[str, Any] = in_proj_weight[hidden_size : hidden_size * 2, :] _lowerCamelCase : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2] _lowerCamelCase : Any = in_proj_weight[-hidden_size :, :] _lowerCamelCase : str = in_proj_bias[-hidden_size :] # fmt: on def lowerCamelCase_( ) -> torch.Tensor: '''simple docstring''' _lowerCamelCase : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Tuple = get_maskformer_config(_lowerCamelCase ) # load original state_dict with open(_lowerCamelCase , "rb" ) as f: _lowerCamelCase : int = pickle.load(_lowerCamelCase ) _lowerCamelCase : Optional[Any] = data["model"] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys _lowerCamelCase : Optional[Any] = create_rename_keys(_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config ) read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase ) # update to torch tensors for key, value in state_dict.items(): _lowerCamelCase : int = torch.from_numpy(_lowerCamelCase ) # load 🤗 model _lowerCamelCase : List[str] = MaskFormerForInstanceSegmentation(_lowerCamelCase ) model.eval() for name, param in model.named_parameters(): print(_lowerCamelCase , param.shape ) _lowerCamelCase, _lowerCamelCase : Any = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(_lowerCamelCase ) == 0, F"""Unexpected keys: {unexpected_keys}""" # verify results _lowerCamelCase : Dict = prepare_img() if "vistas" in model_name: _lowerCamelCase : str = 65 elif "cityscapes" in model_name: _lowerCamelCase : Tuple = 65535 else: _lowerCamelCase : Dict = 255 _lowerCamelCase : List[Any] = True if "ade" in model_name else False _lowerCamelCase : Any = MaskFormerImageProcessor(ignore_index=_lowerCamelCase , reduce_labels=_lowerCamelCase ) _lowerCamelCase : Optional[int] = image_processor(_lowerCamelCase , return_tensors="pt" ) _lowerCamelCase : Optional[Any] = model(**_lowerCamelCase ) print("Logits:" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": _lowerCamelCase : Tuple = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print("Pushing model and image processor to the hub..." ) model.push_to_hub(F"""nielsr/{model_name}""" ) image_processor.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''maskformer-swin-tiny-ade''', type=str, help=('''Name of the MaskFormer model you\'d like to convert''',), ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''', type=str, help='''Path to the original state dict (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _lowerCAmelCase : int = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) _lowerCamelCase : list = [] for char_count in range(_lowerCamelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_lowerCamelCase ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
340
1
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def lowerCamelCase_( _lowerCamelCase = 3 ) -> qiskit.result.counts.Counts: '''simple docstring''' if isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("number of qubits must be a integer." ) if number_of_qubits <= 0: raise ValueError("number of qubits must be > 0." ) if math.floor(_lowerCamelCase ) != number_of_qubits: raise ValueError("number of qubits must be exact integer." ) if number_of_qubits > 10: raise ValueError("number of qubits too large to simulate(>10)." ) _lowerCamelCase : str = QuantumRegister(_lowerCamelCase , "qr" ) _lowerCamelCase : Optional[Any] = ClassicalRegister(_lowerCamelCase , "cr" ) _lowerCamelCase : Optional[int] = QuantumCircuit(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : List[Any] = number_of_qubits for i in range(_lowerCamelCase ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_lowerCamelCase ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCamelCase , _lowerCamelCase ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_lowerCamelCase , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_lowerCamelCase , _lowerCamelCase ) # simulate with 10000 shots _lowerCamelCase : Optional[int] = Aer.get_backend("qasm_simulator" ) _lowerCamelCase : Optional[Any] = execute(_lowerCamelCase , _lowerCamelCase , shots=10000 ) return job.result().get_counts(_lowerCamelCase ) if __name__ == "__main__": print( f'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
340
"""simple docstring""" _lowerCAmelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Any = [False] * len(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = [s] _lowerCamelCase : str = True while queue: _lowerCamelCase : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : Any = True _lowerCamelCase : Any = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = float("Inf" ) _lowerCamelCase : Dict = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _lowerCamelCase : Optional[Any] = sink while v != source: _lowerCamelCase : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : List[str] = parent[v] for i in range(len(_lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
340
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A_ ( _a , unittest.TestCase ): lowerCAmelCase__ = KandinskyInpaintPipeline lowerCAmelCase__ = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] lowerCAmelCase__ = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] lowerCAmelCase__ = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] lowerCAmelCase__ = False @property def _lowercase ( self: Dict ): '''simple docstring''' return 32 @property def _lowercase ( self: Any ): '''simple docstring''' return 32 @property def _lowercase ( self: Tuple ): '''simple docstring''' return self.time_input_dim @property def _lowercase ( self: Dict ): '''simple docstring''' return self.time_input_dim * 4 @property def _lowercase ( self: int ): '''simple docstring''' return 100 @property def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) _lowerCamelCase : List[str] = MCLIPConfig( numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1_005 ,) _lowerCamelCase : Optional[Any] = MultilingualCLIP(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def _lowercase ( self: Dict ): '''simple docstring''' torch.manual_seed(0 ) _lowerCamelCase : str = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } _lowerCamelCase : List[str] = UNetaDConditionModel(**__lowerCAmelCase ) return model @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) _lowerCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Tuple = self.dummy_text_encoder _lowerCamelCase : Dict = self.dummy_tokenizer _lowerCamelCase : int = self.dummy_unet _lowerCamelCase : Dict = self.dummy_movq _lowerCamelCase : Optional[Any] = DDIMScheduler( num_train_timesteps=1_000 ,beta_schedule="linear" ,beta_start=0.0_00_85 ,beta_end=0.0_12 ,clip_sample=__lowerCAmelCase ,set_alpha_to_one=__lowerCAmelCase ,steps_offset=1 ,prediction_type="epsilon" ,thresholding=__lowerCAmelCase ,) _lowerCamelCase : int = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Any=0 ): '''simple docstring''' _lowerCamelCase : List[str] = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase ) # create init_image _lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) _lowerCamelCase : Any = image.cpu().permute(0 ,2 ,3 ,1 )[0] _lowerCamelCase : Optional[Any] = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) ) # create mask _lowerCamelCase : Optional[Any] = np.ones((64, 64) ,dtype=np.floataa ) _lowerCamelCase : Optional[int] = 0 if str(__lowerCAmelCase ).startswith("mps" ): _lowerCamelCase : Optional[int] = torch.manual_seed(__lowerCAmelCase ) else: _lowerCamelCase : Tuple = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = "cpu" _lowerCamelCase : Union[str, Any] = self.get_dummy_components() _lowerCamelCase : str = self.pipeline_class(**__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = output.images _lowerCamelCase : List[Any] = pipe( **self.get_dummy_inputs(__lowerCAmelCase ) ,return_dict=__lowerCAmelCase ,)[0] _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] _lowerCamelCase : str = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) _lowerCamelCase : Tuple = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def _lowercase ( self: Tuple ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A_ ( unittest.TestCase ): def _lowercase ( self: int ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) _lowerCamelCase : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) _lowerCamelCase : Optional[int] = np.ones((768, 768) ,dtype=np.floataa ) _lowerCamelCase : List[str] = 0 _lowerCamelCase : Union[str, Any] = "a hat" _lowerCamelCase : List[str] = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" ,torch_dtype=torch.floataa ) pipe_prior.to(__lowerCAmelCase ) _lowerCamelCase : Dict = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" ,torch_dtype=torch.floataa ) _lowerCamelCase : Optional[Any] = pipeline.to(__lowerCAmelCase ) pipeline.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 ) _lowerCamelCase, _lowerCamelCase : Dict = pipe_prior( __lowerCAmelCase ,generator=__lowerCAmelCase ,num_inference_steps=5 ,negative_prompt="" ,).to_tuple() _lowerCamelCase : Optional[Any] = pipeline( __lowerCAmelCase ,image=__lowerCAmelCase ,mask_image=__lowerCAmelCase ,image_embeds=__lowerCAmelCase ,negative_image_embeds=__lowerCAmelCase ,generator=__lowerCAmelCase ,num_inference_steps=100 ,height=768 ,width=768 ,output_type="np" ,) _lowerCamelCase : Optional[Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCAmelCase ,__lowerCAmelCase )
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'camembert' def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Tuple = position_embedding_type _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Dict = classifier_dropout class A_ ( _a ): @property def _lowercase ( self: Any ): '''simple docstring''' if self.task == "multiple-choice": _lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: _lowerCamelCase : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
340
1
"""simple docstring""" import os import sys import unittest _lowerCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _lowerCAmelCase : List[Any] = os.path.join(git_repo_path, '''src''', '''diffusers''') class A_ ( unittest.TestCase ): def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : List[str] = find_backend(" if not is_torch_available():" ) self.assertEqual(__lowerCAmelCase ,"torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _lowerCamelCase : Optional[Any] = find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(__lowerCAmelCase ,"torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _lowerCamelCase : Any = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(__lowerCAmelCase ,"torch_and_transformers_and_onnx" ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Optional[Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" ,__lowerCAmelCase ) self.assertIn("torch_and_transformers" ,__lowerCAmelCase ) self.assertIn("flax_and_transformers" ,__lowerCAmelCase ) self.assertIn("torch_and_transformers_and_onnx" ,__lowerCAmelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" ,objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] ) self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] = create_dummy_object("CONSTANT" ,"'torch'" ) self.assertEqual(__lowerCAmelCase ,"\nCONSTANT = None\n" ) _lowerCamelCase : Tuple = create_dummy_object("function" ,"'torch'" ) self.assertEqual( __lowerCAmelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) _lowerCamelCase : Dict = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" _lowerCamelCase : Optional[int] = create_dummy_object("FakeClass" ,"'torch'" ) self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Optional[int] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" _lowerCamelCase : List[str] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] ,__lowerCAmelCase )
340
"""simple docstring""" from collections import defaultdict def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : str = True for v in tree[start]: if v not in visited: ret += dfs(_lowerCamelCase ) if ret % 2 == 0: cuts.append(_lowerCamelCase ) return ret def lowerCamelCase_( ) -> int: '''simple docstring''' dfs(1 ) if __name__ == "__main__": _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9 _lowerCAmelCase : str = defaultdict(list) _lowerCAmelCase : dict[int, bool] = {} _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Any = 0 _lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
1
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCAmelCase : Any = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class A_ ( _a ): lowerCAmelCase__ = 'detr' lowerCAmelCase__ = ['past_key_values'] lowerCAmelCase__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self: Optional[Any] ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Dict=100 ,__lowerCAmelCase: str=6 ,__lowerCAmelCase: Any=2_048 ,__lowerCAmelCase: Dict=8 ,__lowerCAmelCase: int=6 ,__lowerCAmelCase: Union[str, Any]=2_048 ,__lowerCAmelCase: int=8 ,__lowerCAmelCase: Dict=0.0 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Union[str, Any]="relu" ,__lowerCAmelCase: List[Any]=256 ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: str=0.0 ,__lowerCAmelCase: List[Any]=0.02 ,__lowerCAmelCase: Any=1.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: List[str]="sine" ,__lowerCAmelCase: List[Any]="resnet50" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: List[str]=False ,__lowerCAmelCase: Tuple=1 ,__lowerCAmelCase: Any=5 ,__lowerCAmelCase: Tuple=2 ,__lowerCAmelCase: Optional[Any]=1 ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Any=5 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: str=0.1 ,**__lowerCAmelCase: str ,): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _lowerCamelCase : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = backbone_config.get("model_type" ) _lowerCamelCase : str = CONFIG_MAPPING[backbone_model_type] _lowerCamelCase : List[Any] = config_class.from_dict(__lowerCAmelCase ) # set timm attributes to None _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = None, None, None _lowerCamelCase : str = use_timm_backbone _lowerCamelCase : str = backbone_config _lowerCamelCase : Dict = num_channels _lowerCamelCase : str = num_queries _lowerCamelCase : Optional[Any] = d_model _lowerCamelCase : Any = encoder_ffn_dim _lowerCamelCase : List[Any] = encoder_layers _lowerCamelCase : List[str] = encoder_attention_heads _lowerCamelCase : Union[str, Any] = decoder_ffn_dim _lowerCamelCase : Tuple = decoder_layers _lowerCamelCase : Tuple = decoder_attention_heads _lowerCamelCase : List[str] = dropout _lowerCamelCase : List[str] = attention_dropout _lowerCamelCase : str = activation_dropout _lowerCamelCase : Tuple = activation_function _lowerCamelCase : Optional[int] = init_std _lowerCamelCase : Optional[Any] = init_xavier_std _lowerCamelCase : Dict = encoder_layerdrop _lowerCamelCase : Optional[Any] = decoder_layerdrop _lowerCamelCase : Optional[int] = encoder_layers _lowerCamelCase : Tuple = auxiliary_loss _lowerCamelCase : Optional[Any] = position_embedding_type _lowerCamelCase : Dict = backbone _lowerCamelCase : Tuple = use_pretrained_backbone _lowerCamelCase : Union[str, Any] = dilation # Hungarian matcher _lowerCamelCase : List[str] = class_cost _lowerCamelCase : List[str] = bbox_cost _lowerCamelCase : Dict = giou_cost # Loss coefficients _lowerCamelCase : Tuple = mask_loss_coefficient _lowerCamelCase : Dict = dice_loss_coefficient _lowerCamelCase : Tuple = bbox_loss_coefficient _lowerCamelCase : List[Any] = giou_loss_coefficient _lowerCamelCase : int = eos_coefficient super().__init__(is_encoder_decoder=__lowerCAmelCase ,**__lowerCAmelCase ) @property def _lowercase ( self: List[str] ): '''simple docstring''' return self.encoder_attention_heads @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return self.d_model @classmethod def _lowercase ( cls: Optional[int] ,__lowerCAmelCase: PretrainedConfig ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' return cls(backbone_config=__lowerCAmelCase ,**__lowerCAmelCase ) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Any = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: _lowerCamelCase : Tuple = self.backbone_config.to_dict() _lowerCamelCase : Union[str, Any] = self.__class__.model_type return output class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: Any ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _lowercase ( self: List[Any] ): '''simple docstring''' return 1e-5 @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' return 12
340
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__''' _lowerCAmelCase : Dict = '''Dummy User''' _lowerCAmelCase : Optional[int] = '''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''' _lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co''' _lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(_lowerCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def lowerCamelCase_( ) -> str: '''simple docstring''' return HfApi(endpoint=_lowerCamelCase ) @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Dict = HfFolder.get_token() HfFolder.save_token(_lowerCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' def _cleanup_repo(_lowerCamelCase ): hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' @contextmanager def _temporary_repo(_lowerCamelCase ): try: yield repo_id finally: cleanup_repo(_lowerCamelCase ) return _temporary_repo @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
340
1
"""simple docstring""" import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> np.array: '''simple docstring''' _lowerCamelCase : List[str] = F"""{sampling_rate}""" _lowerCamelCase : Optional[Any] = "1" _lowerCamelCase : Union[str, Any] = "f32le" _lowerCamelCase : List[str] = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(_lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _lowerCamelCase : List[Any] = ffmpeg_process.communicate(_lowerCamelCase ) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error _lowerCamelCase : Union[str, Any] = output_stream[0] _lowerCamelCase : Optional[Any] = np.frombuffer(_lowerCamelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError("Malformed soundfile" ) return audio def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "f32le" , ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Tuple = F"""{sampling_rate}""" _lowerCamelCase : int = "1" if format_for_conversion == "s16le": _lowerCamelCase : Dict = 2 elif format_for_conversion == "f32le": _lowerCamelCase : Union[str, Any] = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) _lowerCamelCase : List[Any] = platform.system() if system == "Linux": _lowerCamelCase : List[str] = "alsa" _lowerCamelCase : Optional[Any] = "default" elif system == "Darwin": _lowerCamelCase : List[Any] = "avfoundation" _lowerCamelCase : Tuple = ":0" elif system == "Windows": _lowerCamelCase : Optional[int] = "dshow" _lowerCamelCase : Dict = "default" _lowerCamelCase : Optional[int] = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] _lowerCamelCase : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _lowerCamelCase : List[Any] = _ffmpeg_stream(_lowerCamelCase , _lowerCamelCase ) for item in iterator: yield item def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "f32le" , ) -> Optional[int]: '''simple docstring''' if stream_chunk_s is not None: _lowerCamelCase : List[str] = stream_chunk_s else: _lowerCamelCase : Optional[Any] = chunk_length_s _lowerCamelCase : Any = ffmpeg_microphone(_lowerCamelCase , _lowerCamelCase , format_for_conversion=_lowerCamelCase ) if format_for_conversion == "s16le": _lowerCamelCase : str = np.intaa _lowerCamelCase : Tuple = 2 elif format_for_conversion == "f32le": _lowerCamelCase : List[str] = np.floataa _lowerCamelCase : Optional[int] = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: _lowerCamelCase : List[Any] = chunk_length_s / 6 _lowerCamelCase : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(_lowerCamelCase , (int, float) ): _lowerCamelCase : List[str] = [stride_length_s, stride_length_s] _lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _lowerCamelCase : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _lowerCamelCase : Optional[int] = datetime.datetime.now() _lowerCamelCase : Tuple = datetime.timedelta(seconds=_lowerCamelCase ) for item in chunk_bytes_iter(_lowerCamelCase , _lowerCamelCase , stride=(stride_left, stride_right) , stream=_lowerCamelCase ): # Put everything back in numpy scale _lowerCamelCase : List[Any] = np.frombuffer(item["raw"] , dtype=_lowerCamelCase ) _lowerCamelCase : Optional[Any] = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) _lowerCamelCase : str = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Any = B"" _lowerCamelCase, _lowerCamelCase : Union[str, Any] = stride if stride_left + stride_right >= chunk_len: raise ValueError( F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) _lowerCamelCase : Union[str, Any] = 0 for raw in iterator: acc += raw if stream and len(_lowerCamelCase ) < chunk_len: _lowerCamelCase : Tuple = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(_lowerCamelCase ) >= chunk_len: # We are flushing the accumulator _lowerCamelCase : str = (_stride_left, stride_right) _lowerCamelCase : Tuple = {"raw": acc[:chunk_len], "stride": stride} if stream: _lowerCamelCase : Dict = False yield item _lowerCamelCase : Optional[int] = stride_left _lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(_lowerCamelCase ) > stride_left: _lowerCamelCase : int = {"raw": acc, "stride": (_stride_left, 0)} if stream: _lowerCamelCase : Any = False yield item def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : List[str] = 2**24 # 16Mo try: with subprocess.Popen(_lowerCamelCase , stdout=subprocess.PIPE , bufsize=_lowerCamelCase ) as ffmpeg_process: while True: _lowerCamelCase : str = ffmpeg_process.stdout.read(_lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
340
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class A_ ( _a ): def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() _lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ): _lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,) # merge samples if i == 0: _lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample else: _lowerCamelCase : Optional[int] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,): '''simple docstring''' _lowerCamelCase : List[Any] = 0 _lowerCamelCase : str = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,) idx += 1 _lowerCamelCase : int = model_path_to_save + F"""_{idx}""" @classmethod def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : int = 0 _lowerCamelCase : str = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _lowerCamelCase : Dict = pretrained_model_path while os.path.isdir(__lowerCAmelCase ): _lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) controlnets.append(__lowerCAmelCase ) idx += 1 _lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" ) if len(__lowerCAmelCase ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(__lowerCAmelCase )
340
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase : List[Any] = { '''configuration_mobilebert''': [ '''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileBertConfig''', '''MobileBertOnnxConfig''', ], '''tokenization_mobilebert''': ['''MobileBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Tuple = ['''MobileBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Tuple = [ '''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileBertForMaskedLM''', '''MobileBertForMultipleChoice''', '''MobileBertForNextSentencePrediction''', '''MobileBertForPreTraining''', '''MobileBertForQuestionAnswering''', '''MobileBertForSequenceClassification''', '''MobileBertForTokenClassification''', '''MobileBertLayer''', '''MobileBertModel''', '''MobileBertPreTrainedModel''', '''load_tf_weights_in_mobilebert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = [ '''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileBertForMaskedLM''', '''TFMobileBertForMultipleChoice''', '''TFMobileBertForNextSentencePrediction''', '''TFMobileBertForPreTraining''', '''TFMobileBertForQuestionAnswering''', '''TFMobileBertForSequenceClassification''', '''TFMobileBertForTokenClassification''', '''TFMobileBertMainLayer''', '''TFMobileBertModel''', '''TFMobileBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _lowerCamelCase : Tuple = "segformer.encoder." + key if key.startswith("backbone" ): _lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" ) if "norm" in key: _lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" ) if "layer_norm1" in key: _lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )] _lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" ) if "attn.q" in key: _lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _lowerCamelCase : Tuple = key.replace("attn" , "attention.self" ) if "fc1" in key: _lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" ) if "fc2" in key: _lowerCamelCase : Dict = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _lowerCamelCase : int = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" ) _lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )] _lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" ) if key.startswith("head" ): _lowerCamelCase : List[str] = key.replace("head" , "classifier" ) _lowerCamelCase : Union[str, Any] = value return new_state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _lowerCamelCase : int = kv_weight[ : config.hidden_sizes[i], : ] _lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]] _lowerCamelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCamelCase : Optional[Any] = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase_( ) -> Dict: '''simple docstring''' _lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return image @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Any = SegformerConfig() _lowerCamelCase : int = False # set attributes based on model_name _lowerCamelCase : Any = "huggingface/label-files" if "segformer" in model_name: _lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _lowerCamelCase : str = 150 _lowerCamelCase : Dict = "ade20k-id2label.json" _lowerCamelCase : Dict = (1, 150, 128, 128) elif "city" in model_name: _lowerCamelCase : List[str] = 19 _lowerCamelCase : Tuple = "cityscapes-id2label.json" _lowerCamelCase : Tuple = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: _lowerCamelCase : List[str] = True _lowerCamelCase : Tuple = model_name[4:6] _lowerCamelCase : Tuple = 1000 _lowerCamelCase : List[Any] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : int = 256 elif size == "b2": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : List[Any] = 768 _lowerCamelCase : Any = [3, 4, 6, 3] elif size == "b3": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : Union[str, Any] = 768 _lowerCamelCase : Optional[Any] = [3, 4, 18, 3] elif size == "b4": _lowerCamelCase : str = [64, 128, 320, 512] _lowerCamelCase : Optional[Any] = 768 _lowerCamelCase : Dict = [3, 8, 27, 3] elif size == "b5": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : Tuple = 768 _lowerCamelCase : Tuple = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) _lowerCamelCase : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase ) # prepare image _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: _lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) ) else: _lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_lowerCamelCase , _lowerCamelCase ) # create HuggingFace model and load state dict if encoder_only: _lowerCamelCase : Tuple = False _lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase ) else: _lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() # forward pass _lowerCamelCase : Any = model(_lowerCamelCase ) _lowerCamelCase : Dict = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCamelCase : str = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]], [[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]], [[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCamelCase : int = torch.tensor( [ [[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]], [[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]], [[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]], [[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]], [[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]], [[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]], [[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]], [[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]], [[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCamelCase : Dict = torch.tensor( [ [[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]], [[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]], [[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCamelCase : Optional[int] = torch.tensor( [ [[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]], [[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]], [[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]], [[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]], [[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCamelCase : List[Any] = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]], [[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]], [[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCamelCase : Any = torch.tensor( [ [[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]], [[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]], [[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]], [[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]], [[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCamelCase : str = torch.tensor( [ [[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]], [[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]], [[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]], ] ) else: _lowerCamelCase : Dict = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : str = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
340
1
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) # TODO: upload to AWS _lowerCAmelCase : Union[str, Any] = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'retribert' def __init__( self: Dict ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Tuple=768 ,__lowerCAmelCase: Optional[int]=8 ,__lowerCAmelCase: Optional[Any]=12 ,__lowerCAmelCase: Any=3_072 ,__lowerCAmelCase: List[str]="gelu" ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: str=512 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: List[str]=128 ,__lowerCAmelCase: int=0 ,**__lowerCAmelCase: Any ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : Tuple = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Optional[Any] = num_hidden_layers _lowerCamelCase : Any = num_attention_heads _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : Optional[Any] = hidden_dropout_prob _lowerCamelCase : Dict = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : str = layer_norm_eps _lowerCamelCase : Tuple = share_encoders _lowerCamelCase : Tuple = projection_dim
340
"""simple docstring""" _lowerCAmelCase : dict[tuple[int, int, int], int] = {} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _lowerCamelCase : Optional[int] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 ) _lowerCamelCase : List[Any] = state_late + state_absent + state_ontime _lowerCamelCase : int = prizestrings return prizestrings def lowerCamelCase_( _lowerCamelCase = 30 ) -> int: '''simple docstring''' return _calculate(_lowerCamelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
340
1
"""simple docstring""" import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="attention" ) -> int: '''simple docstring''' _lowerCamelCase : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] ) _lowerCamelCase : Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) _lowerCamelCase : Optional[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] ) _lowerCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) _lowerCamelCase : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] ) _lowerCamelCase : Optional[Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) _lowerCamelCase : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] ) _lowerCamelCase : List[str] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]: '''simple docstring''' if split_mlp_wi: _lowerCamelCase : Any = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :] _lowerCamelCase : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :] _lowerCamelCase : Union[str, Any] = (wi_a, wi_a) else: _lowerCamelCase : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :] _lowerCamelCase : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :] return wi, wo def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i] def lowerCamelCase_( _lowerCamelCase , *, _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ) -> Dict: '''simple docstring''' _lowerCamelCase : Any = traverse_util.flatten_dict(variables["target"] ) _lowerCamelCase : List[Any] = {"/".join(_lowerCamelCase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi _lowerCamelCase : List[Any] = "encoder/encoder/mlp/wi_0/kernel" in old print("Split MLP:" , _lowerCamelCase ) _lowerCamelCase : Optional[int] = collections.OrderedDict() # Shared embeddings. _lowerCamelCase : int = old["token_embedder/embedding"] # Encoder. for i in range(_lowerCamelCase ): # Block i, layer 0 (Self Attention). _lowerCamelCase : int = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , "pre_attention_layer_norm" ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , "attention" ) _lowerCamelCase : List[str] = layer_norm _lowerCamelCase : List[str] = k.T _lowerCamelCase : Dict = o.T _lowerCamelCase : List[Any] = q.T _lowerCamelCase : Dict = v.T # Block i, layer 1 (MLP). _lowerCamelCase : Optional[int] = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , "pre_mlp_layer_norm" ) _lowerCamelCase, _lowerCamelCase : Tuple = tax_mlp_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , _lowerCamelCase ) _lowerCamelCase : Optional[int] = layer_norm if split_mlp_wi: _lowerCamelCase : str = wi[0].T _lowerCamelCase : List[str] = wi[1].T else: _lowerCamelCase : Tuple = wi.T _lowerCamelCase : Optional[Any] = wo.T if scalable_attention: # convert the rel_embedding of each layer _lowerCamelCase : Optional[int] = tax_relpos_bias_lookup( _lowerCamelCase , _lowerCamelCase , "encoder" ).T _lowerCamelCase : Tuple = old["encoder/encoder_norm/scale"] if not scalable_attention: _lowerCamelCase : Dict = tax_relpos_bias_lookup( _lowerCamelCase , 0 , "encoder" ).T _lowerCamelCase : Optional[Any] = tax_relpos_bias_lookup( _lowerCamelCase , 0 , "decoder" ).T if not is_encoder_only: # Decoder. for i in range(_lowerCamelCase ): # Block i, layer 0 (Self Attention). _lowerCamelCase : str = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "self_attention" ) _lowerCamelCase : Any = layer_norm _lowerCamelCase : List[str] = k.T _lowerCamelCase : str = o.T _lowerCamelCase : Any = q.T _lowerCamelCase : int = v.T # Block i, layer 1 (Cross Attention). _lowerCamelCase : List[str] = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "encoder_decoder_attention" ) _lowerCamelCase : int = layer_norm _lowerCamelCase : Tuple = k.T _lowerCamelCase : Optional[Any] = o.T _lowerCamelCase : Tuple = q.T _lowerCamelCase : Tuple = v.T # Block i, layer 2 (MLP). _lowerCamelCase : List[Any] = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "pre_mlp_layer_norm" ) _lowerCamelCase, _lowerCamelCase : int = tax_mlp_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , _lowerCamelCase ) _lowerCamelCase : Tuple = layer_norm if split_mlp_wi: _lowerCamelCase : str = wi[0].T _lowerCamelCase : Optional[Any] = wi[1].T else: _lowerCamelCase : List[str] = wi.T _lowerCamelCase : int = wo.T if scalable_attention: # convert the rel_embedding of each layer _lowerCamelCase : Dict = tax_relpos_bias_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" ).T _lowerCamelCase : List[str] = old["decoder/decoder_norm/scale"] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: _lowerCamelCase : str = old["decoder/logits_dense/kernel"].T return new def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: _lowerCamelCase : Tuple = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: _lowerCamelCase : Optional[Any] = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) _lowerCamelCase : Any = state_dict["shared.weight"] return state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : int = checkpoints.load_tax_checkpoint(_lowerCamelCase ) _lowerCamelCase : Optional[int] = convert_tax_to_pytorch( _lowerCamelCase , num_layers=config.num_layers , is_encoder_only=_lowerCamelCase , scalable_attention=_lowerCamelCase ) _lowerCamelCase : int = make_state_dict(_lowerCamelCase , _lowerCamelCase ) model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , ) -> int: '''simple docstring''' _lowerCamelCase : List[str] = MTaConfig.from_json_file(_lowerCamelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: _lowerCamelCase : Optional[int] = UMTaEncoderModel(_lowerCamelCase ) else: _lowerCamelCase : Optional[Any] = UMTaForConditionalGeneration(_lowerCamelCase ) # Load weights from tf checkpoint load_tax_weights_in_ta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(_lowerCamelCase ) # Verify that we can load the checkpoint. model.from_pretrained(_lowerCamelCase ) print("Done" ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) _lowerCAmelCase : List[str] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
340
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : int = str(_lowerCamelCase ) return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" ) def lowerCamelCase_( ) -> int | None: '''simple docstring''' for base_num in range(9999 , 4999 , -1 ): _lowerCamelCase : Union[str, Any] = 100002 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate for base_num in range(333 , 99 , -1 ): _lowerCamelCase : Tuple = 1002003 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
340
1
"""simple docstring""" from __future__ import annotations from cmath import sqrt def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> tuple[complex, complex]: '''simple docstring''' if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) _lowerCamelCase : Union[str, Any] = b * b - 4 * a * c _lowerCamelCase : Optional[int] = (-b + sqrt(_lowerCamelCase )) / (2 * a) _lowerCamelCase : List[Any] = (-b - sqrt(_lowerCamelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def lowerCamelCase_( ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = quadratic_roots(a=5 , b=6 , c=1 ) print(F"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
340
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A_ ( _a ): lowerCAmelCase__ = 'char' lowerCAmelCase__ = 'bpe' lowerCAmelCase__ = 'wp' _lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A_ ( _a ): lowerCAmelCase__ = ['image_processor', 'char_tokenizer'] lowerCAmelCase__ = 'ViTImageProcessor' lowerCAmelCase__ = 'MgpstrTokenizer' def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,__lowerCAmelCase ,) _lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" ) _lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) _lowerCamelCase : List[str] = tokenizer _lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" ) _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ): '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is not None: _lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _lowerCamelCase : Tuple = encodings["input_ids"] return inputs def _lowercase ( self: int ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences _lowerCamelCase : Dict = char_preds.size(0 ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" ) _lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" ) _lowerCamelCase : List[str] = [] _lowerCamelCase : str = [] for i in range(__lowerCAmelCase ): _lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowerCamelCase : Tuple = {} _lowerCamelCase : Tuple = final_strs _lowerCamelCase : int = final_scores _lowerCamelCase : str = char_strs _lowerCamelCase : Dict = bpe_strs _lowerCamelCase : int = wp_strs return out def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ): '''simple docstring''' if format == DecodeType.CHARACTER: _lowerCamelCase : int = self.char_decode _lowerCamelCase : List[str] = 1 _lowerCamelCase : Optional[int] = "[s]" elif format == DecodeType.BPE: _lowerCamelCase : Dict = self.bpe_decode _lowerCamelCase : str = 2 _lowerCamelCase : Union[str, Any] = "#" elif format == DecodeType.WORDPIECE: _lowerCamelCase : int = self.wp_decode _lowerCamelCase : List[str] = 102 _lowerCamelCase : List[Any] = "[SEP]" else: raise ValueError(F"""Format {format} is not supported.""" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Any = pred_logits.size(0 ) _lowerCamelCase : int = pred_logits.size(1 ) _lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:] _lowerCamelCase : List[str] = decoder(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 ) _lowerCamelCase : Any = preds_max_prob[:, 1:] for index in range(__lowerCAmelCase ): _lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_str[index][:pred_eos] _lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist() _lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1 _lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1] _lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowerCAmelCase ) conf_scores.append(__lowerCAmelCase ) return dec_strs, conf_scores def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(__lowerCAmelCase ) def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs
340
1
"""simple docstring""" import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) _lowerCAmelCase : Optional[Any] = { '''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''', '''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''', '''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''', '''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''', '''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''', '''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''', '''mask_downscaling.0''': '''mask_embed.conv1''', '''mask_downscaling.1''': '''mask_embed.layer_norm1''', '''mask_downscaling.3''': '''mask_embed.conv2''', '''mask_downscaling.4''': '''mask_embed.layer_norm2''', '''mask_downscaling.6''': '''mask_embed.conv3''', '''point_embeddings''': '''point_embed''', '''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''', '''image_encoder''': '''vision_encoder''', '''neck.0''': '''neck.conv1''', '''neck.1''': '''neck.layer_norm1''', '''neck.2''': '''neck.conv2''', '''neck.3''': '''neck.layer_norm2''', '''patch_embed.proj''': '''patch_embed.projection''', '''.norm''': '''.layer_norm''', '''blocks''': '''layers''', } def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Dict = {} state_dict.pop("pixel_mean" , _lowerCamelCase ) state_dict.pop("pixel_std" , _lowerCamelCase ) _lowerCamelCase : Optional[Any] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _lowerCamelCase : Tuple = key.replace(_lowerCamelCase , _lowerCamelCase ) if re.match(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = int(re.match(_lowerCamelCase , _lowerCamelCase ).group(2 ) ) if layer_nb == 0: _lowerCamelCase : Any = key.replace("layers.0" , "proj_in" ) elif layer_nb == 1: _lowerCamelCase : Optional[Any] = key.replace("layers.1" , "layers.0" ) elif layer_nb == 2: _lowerCamelCase : Dict = key.replace("layers.2" , "proj_out" ) _lowerCamelCase : int = value _lowerCamelCase : str = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] return model_state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="ybelkada/segment-anything" ) -> int: '''simple docstring''' _lowerCamelCase : Tuple = hf_hub_download(_lowerCamelCase , F"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: _lowerCamelCase : Optional[int] = SamConfig() elif "sam_vit_l" in model_name: _lowerCamelCase : Optional[int] = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) _lowerCamelCase : Tuple = SamConfig( vision_config=_lowerCamelCase , ) elif "sam_vit_h" in model_name: _lowerCamelCase : Tuple = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) _lowerCamelCase : Union[str, Any] = SamConfig( vision_config=_lowerCamelCase , ) _lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location="cpu" ) _lowerCamelCase : int = replace_keys(_lowerCamelCase ) _lowerCamelCase : Optional[int] = SamImageProcessor() _lowerCamelCase : str = SamProcessor(image_processor=_lowerCamelCase ) _lowerCamelCase : List[str] = SamModel(_lowerCamelCase ) hf_model.load_state_dict(_lowerCamelCase ) _lowerCamelCase : List[Any] = hf_model.to("cuda" ) _lowerCamelCase : Union[str, Any] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" _lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" ) _lowerCamelCase : Optional[Any] = [[[400, 650]]] _lowerCamelCase : Optional[Any] = [[1]] _lowerCamelCase : Union[str, Any] = processor(images=np.array(_lowerCamelCase ) , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _lowerCamelCase : List[Any] = hf_model(**_lowerCamelCase ) _lowerCamelCase : List[str] = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8 _lowerCamelCase : List[Any] = processor( images=np.array(_lowerCamelCase ) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _lowerCamelCase : Union[str, Any] = hf_model(**_lowerCamelCase ) _lowerCamelCase : List[str] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4 _lowerCamelCase : str = ((75, 275, 1725, 850),) _lowerCamelCase : int = processor(images=np.array(_lowerCamelCase ) , input_boxes=_lowerCamelCase , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _lowerCamelCase : Any = hf_model(**_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4 # Test with 2 points and 1 image. _lowerCamelCase : Union[str, Any] = [[[400, 650], [800, 650]]] _lowerCamelCase : str = [[1, 1]] _lowerCamelCase : List[Any] = processor( images=np.array(_lowerCamelCase ) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _lowerCamelCase : Dict = hf_model(**_lowerCamelCase ) _lowerCamelCase : Tuple = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2 if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() _lowerCAmelCase : Union[str, Any] = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195'''] parser.add_argument( '''--model_name''', default='''sam_vit_h_4b8939''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) parser.add_argument( '''--model_hub_id''', default='''ybelkada/segment-anything''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) _lowerCAmelCase : Optional[Any] = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
340
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _lowerCAmelCase : List[Any] = get_logger(__name__) class A_ : lowerCAmelCase__ = 'dummy_data' lowerCAmelCase__ = 'datasets' lowerCAmelCase__ = False def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,): '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : List[str] = dataset_name _lowerCamelCase : Optional[int] = cache_dir _lowerCamelCase : Optional[int] = use_local_dummy_data _lowerCamelCase : int = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : int = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : Tuple = str(__lowerCAmelCase ) # to be downloaded _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Dict = None @property def _lowercase ( self: str ): '''simple docstring''' if self._dummy_file is None: _lowerCamelCase : List[str] = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self: str ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : Optional[int] = cached_path( __lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase ) return os.path.join(__lowerCAmelCase ,self.dummy_file_name ) @property def _lowercase ( self: Tuple ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def _lowercase ( self: List[str] ): '''simple docstring''' if self._bucket_url is None: _lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Tuple = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,(list, tuple) ): return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase ) else: return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' return path def _lowercase ( self: Optional[int] ): '''simple docstring''' return {} def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): for single_url in single_urls: download_callback(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = single_urls download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls] else: _lowerCamelCase : Union[str, Any] = single_urls _lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) _lowerCamelCase : List[Any] = value # make sure that values are unique if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url ) _lowerCamelCase : Optional[Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__lowerCAmelCase ) return dummy_data_list def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' def _iter_archive_members(__lowerCAmelCase: Any ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : Tuple = Path(self.dummy_file ).parent _lowerCamelCase : str = path.relative_to(__lowerCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase ) _lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = [paths] for path in paths: if os.path.isfile(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__lowerCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
340
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A_ ( _a , unittest.TestCase ): lowerCAmelCase__ = LDMTextToImagePipeline lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS - { 'negative_prompt', 'negative_prompt_embeds', 'cross_attention_kwargs', 'prompt_embeds', } lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - { 'num_images_per_prompt', 'callback', 'callback_steps', } lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase__ = False def _lowercase ( self: Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) _lowerCamelCase : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,) _lowerCamelCase : Any = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule="scaled_linear" ,clip_sample=__lowerCAmelCase ,set_alpha_to_one=__lowerCAmelCase ,) torch.manual_seed(0 ) _lowerCamelCase : Dict = AutoencoderKL( block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") ,up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") ,latent_channels=4 ,) torch.manual_seed(0 ) _lowerCamelCase : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,) _lowerCamelCase : Union[str, Any] = CLIPTextModel(__lowerCAmelCase ) _lowerCamelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _lowerCamelCase : str = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Union[str, Any]=0 ): '''simple docstring''' if str(__lowerCAmelCase ).startswith("mps" ): _lowerCamelCase : Optional[Any] = torch.manual_seed(__lowerCAmelCase ) else: _lowerCamelCase : List[str] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _lowerCamelCase : Any = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator _lowerCamelCase : str = self.get_dummy_components() _lowerCamelCase : Optional[int] = LDMTextToImagePipeline(**__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : str = self.get_dummy_inputs(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = pipe(**__lowerCAmelCase ).images _lowerCamelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) _lowerCamelCase : str = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class A_ ( unittest.TestCase ): def _lowercase ( self: Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self: Any ,__lowerCAmelCase: str ,__lowerCAmelCase: Any=torch.floataa ,__lowerCAmelCase: str=0 ): '''simple docstring''' _lowerCamelCase : str = torch.manual_seed(__lowerCAmelCase ) _lowerCamelCase : str = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 32, 32) ) _lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : str = self.get_inputs(__lowerCAmelCase ) _lowerCamelCase : Any = pipe(**__lowerCAmelCase ).images _lowerCamelCase : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) _lowerCamelCase : Union[str, Any] = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] ) _lowerCamelCase : Any = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class A_ ( unittest.TestCase ): def _lowercase ( self: int ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self: Dict ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Dict=torch.floataa ,__lowerCAmelCase: List[str]=0 ): '''simple docstring''' _lowerCamelCase : int = torch.manual_seed(__lowerCAmelCase ) _lowerCamelCase : Dict = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 32, 32) ) _lowerCamelCase : Any = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase ) _lowerCamelCase : Any = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Dict = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Any = self.get_inputs(__lowerCAmelCase ) _lowerCamelCase : Dict = pipe(**__lowerCAmelCase ).images[0] _lowerCamelCase : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) _lowerCamelCase : int = np.abs(expected_image - image ).max() assert max_diff < 1e-3
340
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) _lowerCamelCase : int = precision _lowerCamelCase : Dict = ceil(precision / 14 ) _lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt() _lowerCamelCase : int = 1 _lowerCamelCase : Optional[int] = 13591409 _lowerCamelCase : int = Decimal(_lowerCamelCase ) for k in range(1 , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
340
1
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A_ ( _a ): lowerCAmelCase__ = (DPMSolverSinglestepScheduler,) lowerCAmelCase__ = (('num_inference_steps', 2_5),) def _lowercase ( self: Optional[Any] ,**__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = { "num_train_timesteps": 1_000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf" ), "variance_type": None, } config.update(**__lowerCAmelCase ) return config def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: int=0 ,**__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = dict(self.forward_default_kwargs ) _lowerCamelCase : List[Any] = kwargs.pop("num_inference_steps" ,__lowerCAmelCase ) _lowerCamelCase : Dict = self.dummy_sample _lowerCamelCase : Union[str, Any] = 0.1 * sample _lowerCamelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase ) _lowerCamelCase : int = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals _lowerCamelCase : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) _lowerCamelCase : List[Any] = scheduler_class.from_pretrained(__lowerCAmelCase ) new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals _lowerCamelCase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCamelCase, _lowerCamelCase : Any = sample, sample for t in range(__lowerCAmelCase ,time_step + scheduler.config.solver_order + 1 ): _lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase ).prev_sample _lowerCamelCase : str = new_scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _lowercase ( self: str ): '''simple docstring''' pass def _lowercase ( self: int ,__lowerCAmelCase: Any=0 ,**__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : Any = dict(self.forward_default_kwargs ) _lowerCamelCase : Tuple = kwargs.pop("num_inference_steps" ,__lowerCAmelCase ) _lowerCamelCase : Optional[int] = self.dummy_sample _lowerCamelCase : str = 0.1 * sample _lowerCamelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) _lowerCamelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) _lowerCamelCase : str = scheduler_class.from_pretrained(__lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) _lowerCamelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCamelCase : str = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase ).prev_sample _lowerCamelCase : Tuple = new_scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=None ,**__lowerCAmelCase: Any ): '''simple docstring''' if scheduler is None: _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : Dict = self.get_scheduler_config(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : Tuple = self.scheduler_classes[0] _lowerCamelCase : Union[str, Any] = self.get_scheduler_config(**__lowerCAmelCase ) _lowerCamelCase : List[Any] = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = 10 _lowerCamelCase : List[Any] = self.dummy_model() _lowerCamelCase : Dict = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCamelCase : Optional[int] = model(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Any = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample return sample def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Any = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _lowerCamelCase : List[str] = 50 _lowerCamelCase : List[str] = self.dummy_model() _lowerCamelCase : Optional[int] = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _lowerCamelCase : List[Any] = model(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : str = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample _lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.25_74 ) < 1e-3 def _lowercase ( self: Tuple ): '''simple docstring''' for timesteps in [25, 50, 100, 999, 1_000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _lowerCamelCase : Tuple = self.full_loop(scheduler=__lowerCAmelCase ) _lowerCamelCase : str = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.27_91 ) < 1e-3 _lowerCamelCase : Dict = DEISMultistepScheduler.from_config(scheduler.config ) _lowerCamelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) _lowerCamelCase : Dict = UniPCMultistepScheduler.from_config(scheduler.config ) _lowerCamelCase : Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _lowerCamelCase : int = self.full_loop(scheduler=__lowerCAmelCase ) _lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.27_91 ) < 1e-3 def _lowercase ( self: int ): '''simple docstring''' self.check_over_configs(thresholding=__lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,algorithm_type="dpmsolver++" ,solver_order=__lowerCAmelCase ,solver_type=__lowerCAmelCase ,) def _lowercase ( self: Optional[int] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def _lowercase ( self: Any ): '''simple docstring''' for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__lowerCAmelCase ,solver_type=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,algorithm_type=__lowerCAmelCase ,) _lowerCamelCase : Union[str, Any] = self.full_loop( solver_order=__lowerCAmelCase ,solver_type=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,algorithm_type=__lowerCAmelCase ,) assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers" def _lowercase ( self: str ): '''simple docstring''' self.check_over_configs(lower_order_final=__lowerCAmelCase ) self.check_over_configs(lower_order_final=__lowerCAmelCase ) def _lowercase ( self: int ): '''simple docstring''' self.check_over_configs(lambda_min_clipped=-float("inf" ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def _lowercase ( self: Any ): '''simple docstring''' self.check_over_configs(variance_type=__lowerCAmelCase ) self.check_over_configs(variance_type="learned_range" ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]: self.check_over_forward(num_inference_steps=__lowerCAmelCase ,time_step=0 ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = self.full_loop() _lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.27_91 ) < 1e-3 def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.full_loop(use_karras_sigmas=__lowerCAmelCase ) _lowerCamelCase : str = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.22_48 ) < 1e-3 def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Dict = self.full_loop(prediction_type="v_prediction" ) _lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.14_53 ) < 1e-3 def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.full_loop(prediction_type="v_prediction" ,use_karras_sigmas=__lowerCAmelCase ) _lowerCamelCase : Tuple = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.06_49 ) < 1e-3 def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : List[str] = self.get_scheduler_config(thresholding=__lowerCAmelCase ,dynamic_thresholding_ratio=0 ) _lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase ) _lowerCamelCase : List[Any] = 10 _lowerCamelCase : Tuple = self.dummy_model() _lowerCamelCase : int = self.dummy_sample_deter.half() scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _lowerCamelCase : int = model(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa
340
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ ( _a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCamelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCamelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowerCamelCase : str = [] for i in range(_lowerCamelCase ): _lowerCamelCase : Any = i / num_diffusion_timesteps _lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) ) return torch.tensor(_lowerCamelCase , dtype=torch.floataa ) class A_ ( _a , _a ): @register_to_config def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) _lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = 1.0 - self.betas _lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 ) _lowerCamelCase : int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowerCamelCase : Tuple = 1.0 # setable values _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() ) _lowerCamelCase : List[str] = variance_type def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ): '''simple docstring''' return sample def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ): '''simple docstring''' _lowerCamelCase : str = num_inference_steps _lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ): '''simple docstring''' if prev_timestep is None: _lowerCamelCase : List[str] = t - 1 _lowerCamelCase : Optional[int] = self.alphas_cumprod[t] _lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : List[Any] = self.betas[t] else: _lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowerCamelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) ) _lowerCamelCase : str = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowerCamelCase : str = variance.log() _lowerCamelCase : str = beta.log() _lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2 _lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,): '''simple docstring''' _lowerCamelCase : str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 ) else: _lowerCamelCase : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: _lowerCamelCase : List[Any] = t - 1 _lowerCamelCase : Dict = self.alphas_cumprod[t] _lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Any = self.betas[t] _lowerCamelCase : str = self.alphas[t] else: _lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev _lowerCamelCase : Optional[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowerCamelCase : List[Any] = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowerCamelCase : Any = torch.clamp( __lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Union[str, Any] = 0 if t > 0: _lowerCamelCase : Dict = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device ) _lowerCamelCase : Any = self._get_variance( __lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,) if self.variance_type == "fixed_small_log": _lowerCamelCase : Optional[Any] = variance elif self.variance_type == "learned_range": _lowerCamelCase : Optional[int] = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) _lowerCamelCase : Dict = variance * variance_noise _lowerCamelCase : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,): '''simple docstring''' _lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) _lowerCamelCase : Any = timesteps.to(original_samples.device ) _lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 _lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
340
1
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( _a ): lowerCAmelCase__ = ['image_processor', 'tokenizer'] lowerCAmelCase__ = 'LayoutLMv2ImageProcessor' lowerCAmelCase__ = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast') def __init__( self: List[str] ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: int=None ,**__lowerCAmelCase: int ): '''simple docstring''' if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,__lowerCAmelCase ,) _lowerCamelCase : Optional[Any] = kwargs.pop("feature_extractor" ) _lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) def __call__( self: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,__lowerCAmelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None ,__lowerCAmelCase: Union[List[List[int]], List[List[List[int]]]] = None ,__lowerCAmelCase: Optional[Union[List[int], List[List[int]]]] = None ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Union[bool, str, PaddingStrategy] = False ,__lowerCAmelCase: Union[bool, str, TruncationStrategy] = None ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes " "if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." ) # first, apply the image processor _lowerCamelCase : List[Any] = self.image_processor(images=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension) _lowerCamelCase : Any = features["words"] _lowerCamelCase : List[str] = self.tokenizer( text=text if text is not None else features["words"] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features["boxes"] ,word_labels=__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=__lowerCAmelCase ,stride=__lowerCAmelCase ,pad_to_multiple_of=__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,return_overflowing_tokens=__lowerCAmelCase ,return_special_tokens_mask=__lowerCAmelCase ,return_offsets_mapping=__lowerCAmelCase ,return_length=__lowerCAmelCase ,verbose=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ,) # add pixel values _lowerCamelCase : Optional[int] = features.pop("pixel_values" ) if return_overflowing_tokens is True: _lowerCamelCase : int = self.get_overflowing_images(__lowerCAmelCase ,encoded_inputs["overflow_to_sample_mapping"] ) _lowerCamelCase : Optional[int] = images return encoded_inputs def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F""" {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}""" ) return images_with_overflow def _lowercase ( self: List[str] ,*__lowerCAmelCase: Optional[int] ,**__lowerCAmelCase: Union[str, Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCAmelCase ,**__lowerCAmelCase ) def _lowercase ( self: List[str] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: Union[str, Any] ): '''simple docstring''' return self.tokenizer.decode(*__lowerCAmelCase ,**__lowerCAmelCase ) @property def _lowercase ( self: Dict ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "image"] @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,__lowerCAmelCase ,) return self.image_processor_class @property def _lowercase ( self: Optional[int] ): '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,__lowerCAmelCase ,) return self.image_processor
340
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : str = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' for attribute in key.split("." ): _lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: _lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: _lowerCamelCase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCamelCase : Tuple = value elif weight_type == "weight_g": _lowerCamelCase : List[str] = value elif weight_type == "weight_v": _lowerCamelCase : List[Any] = value elif weight_type == "bias": _lowerCamelCase : str = value elif weight_type == "running_mean": _lowerCamelCase : Optional[int] = value elif weight_type == "running_var": _lowerCamelCase : Optional[Any] = value elif weight_type == "num_batches_tracked": _lowerCamelCase : int = value elif weight_type == "inv_freq": _lowerCamelCase : List[str] = value else: _lowerCamelCase : Optional[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = fairseq_model.state_dict() _lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) _lowerCamelCase : List[Any] = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _lowerCamelCase : int = True if "*" in mapped_key: _lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2] _lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase ) if "pos_bias_u" in name: _lowerCamelCase : int = None elif "pos_bias_v" in name: _lowerCamelCase : Any = None elif "weight_g" in name: _lowerCamelCase : Any = "weight_g" elif "weight_v" in name: _lowerCamelCase : Any = "weight_v" elif "bias" in name: _lowerCamelCase : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCamelCase : Dict = "weight" elif "running_mean" in name: _lowerCamelCase : str = "running_mean" elif "inv_freq" in name: _lowerCamelCase : List[Any] = "inv_freq" elif "running_var" in name: _lowerCamelCase : Tuple = "running_var" elif "num_batches_tracked" in name: _lowerCamelCase : str = "num_batches_tracked" else: _lowerCamelCase : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = full_name.split("conv_layers." )[-1] _lowerCamelCase : List[Any] = name.split("." ) _lowerCamelCase : Union[str, Any] = int(items[0] ) _lowerCamelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCamelCase : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCamelCase : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCamelCase : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict: '''simple docstring''' if config_path is not None: _lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" ) else: _lowerCamelCase : Dict = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCamelCase : List[Any] = "rotary" if is_finetuned: if dict_path: _lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : Optional[int] = target_dict.pad_index _lowerCamelCase : Dict = target_dict.bos_index _lowerCamelCase : Optional[Any] = target_dict.eos_index _lowerCamelCase : str = len(target_dict.symbols ) _lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) _lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) _lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) _lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase ) else: _lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" ) _lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) _lowerCamelCase : Dict = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
340
1
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py _lowerCAmelCase : Dict = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) _lowerCAmelCase : List[Any] = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Any = SavedModel() _lowerCamelCase : Optional[int] = [] with open(os.path.join(_lowerCamelCase , "utils" , "tf_ops" , "onnx.json" ) ) as f: _lowerCamelCase : List[Any] = json.load(_lowerCamelCase )["opsets"] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(_lowerCamelCase )] ) with open(_lowerCamelCase , "rb" ) as f: saved_model.ParseFromString(f.read() ) _lowerCamelCase : List[Any] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want _lowerCamelCase : Optional[Any] = sorted(_lowerCamelCase ) _lowerCamelCase : Optional[int] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(_lowerCamelCase ) if strict and len(_lowerCamelCase ) > 0: raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops ) elif len(_lowerCamelCase ) > 0: print(F"""Found the following incompatible ops for the opset {opset}:""" ) print(*_lowerCamelCase , sep="\n" ) else: print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) _lowerCAmelCase : str = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) ) def lowerCamelCase_( _lowerCamelCase ) -> None: '''simple docstring''' if point: if isinstance(_lowerCamelCase , _lowerCamelCase ): for item in point: if not isinstance(_lowerCamelCase , (int, float) ): _lowerCamelCase : Dict = ( "Expected a list of numbers as input, found " F"""{type(_lowerCamelCase ).__name__}""" ) raise TypeError(_lowerCamelCase ) else: _lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}""" raise TypeError(_lowerCamelCase ) else: raise ValueError("Missing an input" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float: '''simple docstring''' _validate_point(_lowerCamelCase ) _validate_point(_lowerCamelCase ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
340
1
"""simple docstring""" import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList _lowerCAmelCase : str = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif'''] class A_ ( _a ): def __init__( self: Tuple ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Union[str, Any]=1 ): '''simple docstring''' _lowerCamelCase : List[str] = tokenizer _lowerCamelCase : Any = dataset _lowerCamelCase : Dict = len(__lowerCAmelCase ) if n_tasks is None else n_tasks _lowerCamelCase : str = n_copies def __iter__( self: List[Any] ): '''simple docstring''' _lowerCamelCase : str = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() ) _lowerCamelCase : int = self.tokenizer(__lowerCAmelCase ,padding=__lowerCAmelCase ,return_tensors="pt" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class A_ ( _a ): def __init__( self: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase : Dict = start_length _lowerCamelCase : List[Any] = eof_strings _lowerCamelCase : Optional[Any] = tokenizer def __call__( self: List[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[Any] ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) _lowerCamelCase : Optional[Any] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__lowerCAmelCase ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : List[str] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : List[Any] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_lowerCamelCase ) ): with torch.no_grad(): _lowerCamelCase : Dict = batch["ids"].shape[-1] _lowerCamelCase : List[str] = accelerator.unwrap_model(_lowerCamelCase ).generate( input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase ) # each task is generated batch_size times _lowerCamelCase : Dict = batch["task_id"].repeat(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = accelerator.pad_across_processes( _lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id ) _lowerCamelCase, _lowerCamelCase : List[Any] = accelerator.gather((generated_tokens, generated_tasks) ) _lowerCamelCase : Any = generated_tokens.cpu().numpy() _lowerCamelCase : Optional[Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ): gen_token_dict[task].append(_lowerCamelCase ) _lowerCamelCase : str = [[] for _ in range(_lowerCamelCase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: _lowerCamelCase : Any = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase ) code_gens[task].append(remove_last_block(_lowerCamelCase ) ) return code_gens def lowerCamelCase_( ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Any = HfArgumentParser(_lowerCamelCase ) _lowerCamelCase : Tuple = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric _lowerCamelCase : Tuple = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing _lowerCamelCase : str = "false" if args.num_workers is None: _lowerCamelCase : Any = multiprocessing.cpu_count() # Use dataset load to feed to accelerate _lowerCamelCase : int = Accelerator() set_seed(args.seed , device_specific=_lowerCamelCase ) # Load model and tokenizer _lowerCamelCase : Any = AutoTokenizer.from_pretrained(args.model_ckpt ) _lowerCamelCase : List[str] = tokenizer.eos_token _lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings _lowerCamelCase : Optional[int] = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ), } # Load evaluation dataset and metric _lowerCamelCase : int = load_dataset("openai_humaneval" ) _lowerCamelCase : Optional[int] = load_metric("code_eval" ) _lowerCamelCase : Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] ) _lowerCamelCase : Tuple = args.n_samples // args.batch_size _lowerCamelCase : Union[str, Any] = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase ) # do not confuse args.batch_size, which is actually the num_return_sequences _lowerCamelCase : Tuple = DataLoader(_lowerCamelCase , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: _lowerCamelCase : str = code_eval_metric.compute(references=[""] , predictions=[[""]] ) except ValueError as exception: print( "Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`" " flag to enable code evaluation." ) raise exception _lowerCamelCase, _lowerCamelCase : List[str] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : List[Any] = complete_code( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , ) if accelerator.is_main_process: _lowerCamelCase : List[str] = [] for task in tqdm(range(_lowerCamelCase ) ): _lowerCamelCase : Optional[Any] = human_eval["test"][task]["test"] _lowerCamelCase : List[str] = F"""check({human_eval['test'][task]['entry_point']})""" references.append("\n" + test_func + "\n" + entry_point ) # Evaluate completions with "code_eval" metric _lowerCamelCase, _lowerCamelCase : Tuple = code_eval_metric.compute( references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers ) print(F"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file , "w" ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
340
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Optional[Any] = np.inf def set_batch_size(_lowerCamelCase ) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary": _lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_lowerCamelCase , _lowerCamelCase ) return None if batch_size is np.inf else batch_size class A_ ( _a ): def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,): '''simple docstring''' super().__init__( __lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,) _lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths} _lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1] _lowerCamelCase : int = Parquet( cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,) def _lowercase ( self: Optional[int] ): '''simple docstring''' if self.streaming: _lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCamelCase : Tuple = None _lowerCamelCase : Optional[int] = None _lowerCamelCase : List[str] = None _lowerCamelCase : str = None self.builder.download_and_prepare( download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,) _lowerCamelCase : Any = self.builder.as_dataset( split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory ) return dataset class A_ : def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,): '''simple docstring''' _lowerCamelCase : Any = dataset _lowerCamelCase : Any = path_or_buf _lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features ) _lowerCamelCase : List[str] = parquet_writer_kwargs def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with open(self.path_or_buf ,"wb+" ) as buffer: _lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) else: _lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs ) return written def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[str] = 0 _lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase ) _lowerCamelCase : List[str] = self.dataset.features.arrow_schema _lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase ) for offset in logging.tqdm( range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,): _lowerCamelCase : List[str] = query_table( table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,) writer.write_table(__lowerCAmelCase ) written += batch.nbytes writer.close() return written
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase ) -> list: '''simple docstring''' if n_term == "": return [] _lowerCamelCase : list = [] for temp in range(int(_lowerCamelCase ) ): series.append(F"""1/{temp + 1}""" if series else "1" ) return series if __name__ == "__main__": _lowerCAmelCase : Tuple = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
340
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Optional[int] = {} _lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"] _lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] ) return output _lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments) _lowerCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: _lowerCAmelCase : Any = multiprocessing.cpu_count() _lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowerCAmelCase : Union[str, Any] = time.time() _lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''') print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') _lowerCAmelCase : Any = time.time() _lowerCAmelCase : Dict = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') _lowerCAmelCase : str = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
340
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue_model_parallelism.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, ] ) class A_ ( unittest.TestCase ): def _lowercase ( self: Any ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="utf-8" ,check=__lowerCAmelCase ,) assert hasattr(self ,"env" ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : List[Any] = { "enabled": True, "processes_per_host": 8, } _lowerCamelCase : List[Any] = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } _lowerCamelCase : Tuple = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} _lowerCamelCase : List[str] = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__lowerCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__lowerCAmelCase ,hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, } ,metric_definitions=self.env.metric_definitions ,distribution=__lowerCAmelCase ,py_version="py36" ,) def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' TrainingJobAnalytics(__lowerCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.create_estimator(__lowerCAmelCase ) # run training estimator.fit() # result dataframe _lowerCamelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _lowerCamelCase : str = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) _lowerCamelCase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _lowerCamelCase : Tuple = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" ,999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" ,"w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} ,__lowerCAmelCase )
340
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Optional[Any] = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = [False] * len(_lowerCamelCase ) _lowerCamelCase : List[Any] = [] queue.append(_lowerCamelCase ) _lowerCamelCase : int = True while queue: _lowerCamelCase : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[Any] = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Any = 0 while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = float("Inf" ) _lowerCamelCase : int = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Optional[int] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : str = parent[s] max_flow += path_flow _lowerCamelCase : Tuple = sink while v != source: _lowerCamelCase : str = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : Union[str, Any] = parent[v] return max_flow _lowerCAmelCase : Any = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = 0, 5 print(ford_fulkerson(graph, source, sink))
340
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig _lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) class A_ ( _a ): lowerCAmelCase__ = 'masked_bert' def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : str = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : int = pruning_method _lowerCamelCase : str = mask_init _lowerCamelCase : List[Any] = mask_scale
340
1
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _lowerCamelCase : Tuple = "segformer.encoder." + key if key.startswith("backbone" ): _lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" ) if "norm" in key: _lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" ) if "layer_norm1" in key: _lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )] _lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" ) if "attn.q" in key: _lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _lowerCamelCase : Tuple = key.replace("attn" , "attention.self" ) if "fc1" in key: _lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" ) if "fc2" in key: _lowerCamelCase : Dict = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _lowerCamelCase : int = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" ) _lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )] _lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" ) if key.startswith("head" ): _lowerCamelCase : List[str] = key.replace("head" , "classifier" ) _lowerCamelCase : Union[str, Any] = value return new_state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _lowerCamelCase : int = kv_weight[ : config.hidden_sizes[i], : ] _lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]] _lowerCamelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCamelCase : Optional[Any] = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase_( ) -> Dict: '''simple docstring''' _lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return image @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Any = SegformerConfig() _lowerCamelCase : int = False # set attributes based on model_name _lowerCamelCase : Any = "huggingface/label-files" if "segformer" in model_name: _lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _lowerCamelCase : str = 150 _lowerCamelCase : Dict = "ade20k-id2label.json" _lowerCamelCase : Dict = (1, 150, 128, 128) elif "city" in model_name: _lowerCamelCase : List[str] = 19 _lowerCamelCase : Tuple = "cityscapes-id2label.json" _lowerCamelCase : Tuple = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: _lowerCamelCase : List[str] = True _lowerCamelCase : Tuple = model_name[4:6] _lowerCamelCase : Tuple = 1000 _lowerCamelCase : List[Any] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : int = 256 elif size == "b2": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : List[Any] = 768 _lowerCamelCase : Any = [3, 4, 6, 3] elif size == "b3": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : Union[str, Any] = 768 _lowerCamelCase : Optional[Any] = [3, 4, 18, 3] elif size == "b4": _lowerCamelCase : str = [64, 128, 320, 512] _lowerCamelCase : Optional[Any] = 768 _lowerCamelCase : Dict = [3, 8, 27, 3] elif size == "b5": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : Tuple = 768 _lowerCamelCase : Tuple = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) _lowerCamelCase : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase ) # prepare image _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: _lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) ) else: _lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_lowerCamelCase , _lowerCamelCase ) # create HuggingFace model and load state dict if encoder_only: _lowerCamelCase : Tuple = False _lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase ) else: _lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() # forward pass _lowerCamelCase : Any = model(_lowerCamelCase ) _lowerCamelCase : Dict = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCamelCase : str = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]], [[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]], [[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCamelCase : int = torch.tensor( [ [[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]], [[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]], [[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]], [[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]], [[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]], [[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]], [[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]], [[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]], [[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCamelCase : Dict = torch.tensor( [ [[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]], [[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]], [[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCamelCase : Optional[int] = torch.tensor( [ [[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]], [[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]], [[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]], [[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]], [[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCamelCase : List[Any] = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]], [[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]], [[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCamelCase : Any = torch.tensor( [ [[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]], [[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]], [[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]], [[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]], [[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCamelCase : str = torch.tensor( [ [[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]], [[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]], [[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]], ] ) else: _lowerCamelCase : Dict = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : str = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
340
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model _lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]: '''simple docstring''' if rng is None: _lowerCamelCase : Union[str, Any] = random.Random() _lowerCamelCase : Union[str, Any] = 1 for dim in shape: total_dims *= dim _lowerCamelCase : Optional[int] = [] for _ in range(_lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase ) return output def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase ) # make sure that at least one token is attended to for each batch _lowerCamelCase : List[str] = 1 return attn_mask @require_flax class A_ : lowerCAmelCase__ = None lowerCAmelCase__ = () def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _lowerCamelCase : List[str] = 2 _lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2 _lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length] _lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase ) _lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _lowerCamelCase : List[str] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = False _lowerCamelCase : Dict = max_length _lowerCamelCase : Tuple = 0 for model_class in self.all_generative_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval() _lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params ) _lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences _lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config() _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[int] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = max_length _lowerCamelCase : Dict = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[str] = model_class(__lowerCAmelCase ) _lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config() _lowerCamelCase : Tuple = False _lowerCamelCase : Union[str, Any] = max_length _lowerCamelCase : List[str] = 2 _lowerCamelCase : Optional[int] = 2 for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() _lowerCamelCase : int = True _lowerCamelCase : List[Any] = max_length _lowerCamelCase : Optional[Any] = 0.8 _lowerCamelCase : Union[str, Any] = 10 _lowerCamelCase : List[str] = 0.3 _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : str = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ) _lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : int = jit(model.generate ) _lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config() _lowerCamelCase : List[str] = max_length _lowerCamelCase : Tuple = 1 _lowerCamelCase : Any = 8 _lowerCamelCase : Dict = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : Any = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() _lowerCamelCase : Dict = max_length _lowerCamelCase : List[Any] = 2 _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[str] = 8 _lowerCamelCase : List[Any] = 9 for model_class in self.all_generative_model_classes: _lowerCamelCase : int = model_class(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Tuple = jit(model.generate ) _lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : Dict = False _lowerCamelCase : Any = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[Any] = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Any = jit(model.generate ) _lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config() # pad attention mask on the left _lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 ) _lowerCamelCase : int = 2 _lowerCamelCase : int = max_length for model_class in self.all_generative_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase ) _lowerCamelCase : Dict = jit(model.generate ) _lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() ) @require_flax class A_ ( unittest.TestCase ): def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) _lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) _lowerCamelCase : Optional[Any] = "Hello world" _lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ): model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ): _lowerCamelCase : List[str] = {"foo": "bar"} model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
340
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : str = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' for attribute in key.split("." ): _lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: _lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: _lowerCamelCase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCamelCase : Tuple = value elif weight_type == "weight_g": _lowerCamelCase : List[str] = value elif weight_type == "weight_v": _lowerCamelCase : List[Any] = value elif weight_type == "bias": _lowerCamelCase : str = value elif weight_type == "running_mean": _lowerCamelCase : Optional[int] = value elif weight_type == "running_var": _lowerCamelCase : Optional[Any] = value elif weight_type == "num_batches_tracked": _lowerCamelCase : int = value elif weight_type == "inv_freq": _lowerCamelCase : List[str] = value else: _lowerCamelCase : Optional[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = fairseq_model.state_dict() _lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) _lowerCamelCase : List[Any] = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _lowerCamelCase : int = True if "*" in mapped_key: _lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2] _lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase ) if "pos_bias_u" in name: _lowerCamelCase : int = None elif "pos_bias_v" in name: _lowerCamelCase : Any = None elif "weight_g" in name: _lowerCamelCase : Any = "weight_g" elif "weight_v" in name: _lowerCamelCase : Any = "weight_v" elif "bias" in name: _lowerCamelCase : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCamelCase : Dict = "weight" elif "running_mean" in name: _lowerCamelCase : str = "running_mean" elif "inv_freq" in name: _lowerCamelCase : List[Any] = "inv_freq" elif "running_var" in name: _lowerCamelCase : Tuple = "running_var" elif "num_batches_tracked" in name: _lowerCamelCase : str = "num_batches_tracked" else: _lowerCamelCase : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = full_name.split("conv_layers." )[-1] _lowerCamelCase : List[Any] = name.split("." ) _lowerCamelCase : Union[str, Any] = int(items[0] ) _lowerCamelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCamelCase : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCamelCase : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCamelCase : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict: '''simple docstring''' if config_path is not None: _lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" ) else: _lowerCamelCase : Dict = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCamelCase : List[Any] = "rotary" if is_finetuned: if dict_path: _lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : Optional[int] = target_dict.pad_index _lowerCamelCase : Dict = target_dict.bos_index _lowerCamelCase : Optional[Any] = target_dict.eos_index _lowerCamelCase : str = len(target_dict.symbols ) _lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) _lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) _lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) _lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase ) else: _lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" ) _lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) _lowerCamelCase : Dict = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : int = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A_ ( _a ): lowerCAmelCase__ = 'mobilenet_v1' def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[Any] = depth_multiplier _lowerCamelCase : Any = min_depth _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Dict = tf_padding _lowerCamelCase : Union[str, Any] = classifier_dropout_prob _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowercase ( self: Any ): '''simple docstring''' return 1e-4
340
1
"""simple docstring""" import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow _lowerCAmelCase : Optional[int] = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase : int = logging.getLogger() def lowerCamelCase_( ) -> List[Any]: '''simple docstring''' _lowerCamelCase : str = argparse.ArgumentParser() parser.add_argument("-f" ) _lowerCamelCase : Union[str, Any] = parser.parse_args() return args.f def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="eval" ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = os.path.join(_lowerCamelCase , F"""{split}_results.json""" ) if os.path.exists(_lowerCamelCase ): with open(_lowerCamelCase , "r" ) as f: return json.load(_lowerCamelCase ) raise ValueError(F"""can't find {path}""" ) _lowerCAmelCase : Tuple = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A_ ( _a ): def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : int = self.get_auto_remove_tmp_dir() _lowerCamelCase : Any = F""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ): run_flax_glue.main() _lowerCamelCase : Union[str, Any] = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] ,0.75 ) @slow def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir() _lowerCamelCase : int = F""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ): run_clm_flax.main() _lowerCamelCase : Optional[int] = get_results(__lowerCAmelCase ) self.assertLess(result["eval_perplexity"] ,100 ) @slow def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir() _lowerCamelCase : str = F""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ): run_summarization_flax.main() _lowerCamelCase : Dict = get_results(__lowerCAmelCase ,split="test" ) self.assertGreaterEqual(result["test_rouge1"] ,10 ) self.assertGreaterEqual(result["test_rouge2"] ,2 ) self.assertGreaterEqual(result["test_rougeL"] ,7 ) self.assertGreaterEqual(result["test_rougeLsum"] ,7 ) @slow def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Any = self.get_auto_remove_tmp_dir() _lowerCamelCase : Union[str, Any] = F""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ): run_mlm_flax.main() _lowerCamelCase : Dict = get_results(__lowerCAmelCase ) self.assertLess(result["eval_perplexity"] ,42 ) @slow def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir() _lowerCamelCase : Dict = F""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ): run_ta_mlm_flax.main() _lowerCamelCase : Optional[Any] = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] ,0.42 ) @slow def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : Optional[int] = 7 if get_gpu_count() > 1 else 2 _lowerCamelCase : Union[str, Any] = self.get_auto_remove_tmp_dir() _lowerCamelCase : int = F""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ): run_flax_ner.main() _lowerCamelCase : Dict = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] ,0.75 ) self.assertGreaterEqual(result["eval_f1"] ,0.3 ) @slow def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Any = self.get_auto_remove_tmp_dir() _lowerCamelCase : Union[str, Any] = F""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ): run_qa.main() _lowerCamelCase : Optional[Any] = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_f1"] ,30 ) self.assertGreaterEqual(result["eval_exact"] ,30 )
340
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase_( _lowerCamelCase ) -> Any: '''simple docstring''' for param in module.parameters(): _lowerCamelCase : Optional[int] = False def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : Dict = plt.imshow(_lowerCamelCase ) fig.axes.get_xaxis().set_visible(_lowerCamelCase ) fig.axes.get_yaxis().set_visible(_lowerCamelCase ) plt.show() def lowerCamelCase_( ) -> str: '''simple docstring''' _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
340
1
"""simple docstring""" from graphs.minimum_spanning_tree_kruskal import kruskal def lowerCamelCase_( ) -> List[str]: '''simple docstring''' _lowerCamelCase : Optional[Any] = 9 _lowerCamelCase : Tuple = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCamelCase : str = kruskal(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[Any] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_lowerCamelCase ) == sorted(_lowerCamelCase )
340
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = len(_lowerCamelCase ) _lowerCamelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) _lowerCamelCase : list = [] for char_count in range(_lowerCamelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_lowerCamelCase ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
340
1
"""simple docstring""" import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A_ : def __init__( self: List[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[str]=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: Tuple=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[str]=32 ,__lowerCAmelCase: Optional[Any]=5 ,__lowerCAmelCase: List[Any]=4 ,__lowerCAmelCase: Optional[Any]=37 ,__lowerCAmelCase: int="gelu" ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: Tuple=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: int=None ,__lowerCAmelCase: List[Any]=2 ,): '''simple docstring''' _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Optional[Any] = image_size _lowerCamelCase : List[Any] = patch_size _lowerCamelCase : Any = num_channels _lowerCamelCase : Union[str, Any] = is_training _lowerCamelCase : int = use_labels _lowerCamelCase : Optional[Any] = hidden_size _lowerCamelCase : Optional[int] = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : Optional[int] = hidden_act _lowerCamelCase : int = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Union[str, Any] = type_sequence_label_size _lowerCamelCase : Union[str, Any] = initializer_range _lowerCamelCase : int = scope _lowerCamelCase : str = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _lowerCamelCase : Dict = (image_size // patch_size) ** 2 _lowerCamelCase : Dict = num_patches + 1 def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : List[Any] = None if self.use_labels: _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _lowerCamelCase : Optional[int] = self.get_config() return config, pixel_values, labels def _lowercase ( self: Optional[Any] ): '''simple docstring''' return ViTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def _lowercase ( self: Tuple ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = ViTModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Any = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Optional[Any] = ViTForMaskedImageModeling(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[str] = model(__lowerCAmelCase ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowerCamelCase : Dict = 1 _lowerCamelCase : str = ViTForMaskedImageModeling(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCamelCase : Optional[int] = model(__lowerCAmelCase ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def _lowercase ( self: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.type_sequence_label_size _lowerCamelCase : Dict = ViTForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[Any] = model(__lowerCAmelCase ,labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCamelCase : Dict = 1 _lowerCamelCase : Dict = ViTForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Tuple = self.prepare_config_and_inputs() ( ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ) : int = config_and_inputs _lowerCamelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A_ ( _a , _a , unittest.TestCase ): lowerCAmelCase__ = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) lowerCAmelCase__ = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = ViTModelTester(self ) _lowerCamelCase : Tuple = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 ) def _lowercase ( self: Tuple ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : int = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) _lowerCamelCase : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) _lowerCamelCase : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()] _lowerCamelCase : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] ,__lowerCAmelCase ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase ) def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def _lowercase ( self: str ): '''simple docstring''' for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Union[str, Any] = ViTModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def lowerCamelCase_( ) -> Any: '''simple docstring''' _lowerCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A_ ( unittest.TestCase ): @cached_property def _lowercase ( self: str ): '''simple docstring''' return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : Tuple = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__lowerCAmelCase ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Dict = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): _lowerCamelCase : List[Any] = model(**__lowerCAmelCase ) # verify the logits _lowerCamelCase : Optional[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,__lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) ) @slow def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Optional[Any] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" ,size=480 ) _lowerCamelCase : Optional[int] = prepare_img() _lowerCamelCase : int = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ) _lowerCamelCase : Optional[Any] = inputs.pixel_values.to(__lowerCAmelCase ) # forward pass with torch.no_grad(): _lowerCamelCase : List[Any] = model(__lowerCAmelCase ,interpolate_pos_encoding=__lowerCAmelCase ) # verify the logits _lowerCamelCase : str = torch.Size((1, 3_601, 384) ) self.assertEqual(outputs.last_hidden_state.shape ,__lowerCAmelCase ) _lowerCamelCase : Any = torch.tensor( [[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : Optional[Any] = ViTModel.from_pretrained("facebook/dino-vits8" ,torch_dtype=torch.floataa ,device_map="auto" ) _lowerCamelCase : List[str] = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : int = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ) _lowerCamelCase : Tuple = inputs.pixel_values.to(__lowerCAmelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _lowerCamelCase : Tuple = model(__lowerCAmelCase )
340
"""simple docstring""" _lowerCAmelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Any = [False] * len(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = [s] _lowerCamelCase : str = True while queue: _lowerCamelCase : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : Any = True _lowerCamelCase : Any = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = float("Inf" ) _lowerCamelCase : Dict = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _lowerCamelCase : Optional[Any] = sink while v != source: _lowerCamelCase : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : List[str] = parent[v] for i in range(len(_lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
340
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> list: '''simple docstring''' _lowerCamelCase : Optional[int] = [] _lowerCamelCase, _lowerCamelCase : Tuple = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) _lowerCamelCase : Dict = result + left + right return input_list def lowerCamelCase_( _lowerCamelCase ) -> list: '''simple docstring''' if len(_lowerCamelCase ) <= 1: return input_list _lowerCamelCase : Optional[Any] = list(_lowerCamelCase ) # iteration for two-way merging _lowerCamelCase : Tuple = 2 while p <= len(_lowerCamelCase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ): _lowerCamelCase : Dict = i _lowerCamelCase : List[Any] = i + p - 1 _lowerCamelCase : Union[str, Any] = (low + high + 1) // 2 _lowerCamelCase : str = merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # final merge of last two parts if p * 2 >= len(_lowerCamelCase ): _lowerCamelCase : int = i _lowerCamelCase : int = merge(_lowerCamelCase , 0 , _lowerCamelCase , len(_lowerCamelCase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _lowerCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip() if user_input == "": _lowerCAmelCase : List[Any] = [] else: _lowerCAmelCase : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')] print(iter_merge_sort(unsorted))
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class A_ ( _a ): lowerCAmelCase__ = 'camembert' def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : int = intermediate_size _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Tuple = position_embedding_type _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Dict = classifier_dropout class A_ ( _a ): @property def _lowercase ( self: Any ): '''simple docstring''' if self.task == "multiple-choice": _lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: _lowerCamelCase : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : str = abs(_lowerCamelCase ) _lowerCamelCase : List[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = abs(_lowerCamelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) ) def lowerCamelCase_( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(_lowerCamelCase , _lowerCamelCase ) -> None: _lowerCamelCase : Union[str, Any] = F"""{func.__name__}({value})""" _lowerCamelCase : str = timeit(F"""__main__.{call}""" , setup="import __main__" ) print(F"""{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds""" ) for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(_lowerCamelCase , _lowerCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
340
"""simple docstring""" from collections import defaultdict def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : str = True for v in tree[start]: if v not in visited: ret += dfs(_lowerCamelCase ) if ret % 2 == 0: cuts.append(_lowerCamelCase ) return ret def lowerCamelCase_( ) -> int: '''simple docstring''' dfs(1 ) if __name__ == "__main__": _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9 _lowerCAmelCase : str = defaultdict(list) _lowerCAmelCase : dict[int, bool] = {} _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Any = 0 _lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
1
"""simple docstring""" import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) _lowerCAmelCase : List[str] = '''bert-base-cased''' _lowerCAmelCase : Any = '''fp16''' _lowerCAmelCase : List[Any] = '''bf16''' _lowerCAmelCase : Union[str, Any] = [FPaa, BFaa] @require_fsdp @require_cuda class A_ ( _a ): def _lowercase ( self: Union[str, Any] ): '''simple docstring''' super().setUp() _lowerCamelCase : Dict = dict( ACCELERATE_USE_FSDP="true" ,MASTER_ADDR="localhost" ,MASTER_PORT="10999" ,RANK="0" ,LOCAL_RANK="0" ,WORLD_SIZE="1" ,) def _lowercase ( self: int ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = self.dist_env.copy() _lowerCamelCase : Any = F"""{i + 1}""" _lowerCamelCase : str = strategy with mockenv_context(**__lowerCAmelCase ): _lowerCamelCase : Tuple = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__lowerCAmelCase ): _lowerCamelCase : List[Any] = self.dist_env.copy() _lowerCamelCase : List[Any] = prefetch_policy with mockenv_context(**__lowerCAmelCase ): _lowerCamelCase : List[Any] = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) ) def _lowercase ( self: List[str] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__lowerCAmelCase ): _lowerCamelCase : Tuple = self.dist_env.copy() _lowerCamelCase : int = state_dict_type with mockenv_context(**__lowerCAmelCase ): _lowerCamelCase : Dict = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[Any] = AutoModel.from_pretrained(__lowerCAmelCase ) for policy in FSDP_AUTO_WRAP_POLICY: _lowerCamelCase : Optional[int] = self.dist_env.copy() _lowerCamelCase : Any = policy if policy == "TRANSFORMER_BASED_WRAP": _lowerCamelCase : List[str] = "BertLayer" elif policy == "SIZE_BASED_WRAP": _lowerCamelCase : Optional[int] = "2000" with mockenv_context(**__lowerCAmelCase ): _lowerCamelCase : str = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) _lowerCamelCase : Any = self.dist_env.copy() _lowerCamelCase : List[str] = "TRANSFORMER_BASED_WRAP" _lowerCamelCase : Optional[Any] = "T5Layer" with mockenv_context(**__lowerCAmelCase ): _lowerCamelCase : Tuple = FullyShardedDataParallelPlugin() with self.assertRaises(__lowerCAmelCase ) as cm: fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase ) self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) ) _lowerCamelCase : Any = self.dist_env.copy() _lowerCamelCase : Union[str, Any] = "SIZE_BASED_WRAP" _lowerCamelCase : Tuple = "0" with mockenv_context(**__lowerCAmelCase ): _lowerCamelCase : Any = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def _lowercase ( self: str ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: _lowerCamelCase : List[str] = self.dist_env.copy() _lowerCamelCase : List[Any] = mp_dtype with mockenv_context(**__lowerCAmelCase ): _lowerCamelCase : Dict = Accelerator() if mp_dtype == "fp16": _lowerCamelCase : List[str] = torch.floataa elif mp_dtype == "bf16": _lowerCamelCase : Union[str, Any] = torch.bfloataa _lowerCamelCase : int = MixedPrecision(param_dtype=__lowerCAmelCase ,reduce_dtype=__lowerCAmelCase ,buffer_dtype=__lowerCAmelCase ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__lowerCAmelCase ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler ,__lowerCAmelCase ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__lowerCAmelCase ) def _lowercase ( self: List[Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: _lowerCamelCase : Dict = self.dist_env.copy() _lowerCamelCase : Union[str, Any] = str(__lowerCAmelCase ).lower() with mockenv_context(**__lowerCAmelCase ): _lowerCamelCase : List[str] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__lowerCAmelCase ) ) @require_fsdp @require_multi_gpu @slow class A_ ( _a ): def _lowercase ( self: List[Any] ): '''simple docstring''' super().setUp() _lowerCamelCase : List[str] = 0.82 _lowerCamelCase : Union[str, Any] = [ "fsdp_shard_grad_op_transformer_based_wrap", "fsdp_full_shard_transformer_based_wrap", ] _lowerCamelCase : Optional[int] = { "multi_gpu_fp16": 3_200, "fsdp_shard_grad_op_transformer_based_wrap_fp16": 2_000, "fsdp_full_shard_transformer_based_wrap_fp16": 1_900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } _lowerCamelCase : Tuple = 160 _lowerCamelCase : Optional[int] = 160 _lowerCamelCase : Any = inspect.getfile(accelerate.test_utils ) _lowerCamelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : str = os.path.join(self.test_scripts_folder ,"test_performance.py" ) _lowerCamelCase : List[Any] = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"] for config in self.performance_configs: _lowerCamelCase : int = cmd.copy() for i, strategy in enumerate(__lowerCAmelCase ): if strategy.lower() in config: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "fp32" in config: cmd_config.append("--mixed_precision=no" ) else: cmd_config.append("--mixed_precision=fp16" ) if "cpu_offload" in config: cmd_config.append("--fsdp_offload_params=True" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000" ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--performance_lower_bound={self.performance_lower_bound}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCAmelCase ,env=os.environ.copy() ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Optional[Any] = os.path.join(self.test_scripts_folder ,"test_checkpointing.py" ) _lowerCamelCase : Tuple = [ "accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp", "--mixed_precision=fp16", "--fsdp_transformer_layer_cls_to_wrap=BertLayer", ] for i, strategy in enumerate(__lowerCAmelCase ): _lowerCamelCase : Tuple = cmd.copy() cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) if strategy != "FULL_SHARD": continue _lowerCamelCase : Dict = len(__lowerCAmelCase ) for state_dict_type in FSDP_STATE_DICT_TYPE: _lowerCamelCase : List[Any] = cmd_config[:state_dict_config_index] cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", "--partial_train_epoch=1", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCAmelCase ,env=os.environ.copy() ) _lowerCamelCase : int = cmd_config[:-1] _lowerCamelCase : Optional[Any] = os.path.join(self.tmpdir ,"epoch_0" ) cmd_config.extend( [ F"""--resume_from_checkpoint={resume_from_checkpoint}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCAmelCase ,env=os.environ.copy() ) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : List[Any] = os.path.join(self.test_scripts_folder ,"test_peak_memory_usage.py" ) _lowerCamelCase : Union[str, Any] = [ "accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): _lowerCamelCase : Tuple = cmd.copy() if "fp16" in spec: cmd_config.extend(["--mixed_precision=fp16"] ) else: cmd_config.extend(["--mixed_precision=no"] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["--use_fsdp"] ) for i, strategy in enumerate(__lowerCAmelCase ): if strategy.lower() in spec: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "cpu_offload" in spec: cmd_config.append("--fsdp_offload_params=True" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000" ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--peak_memory_upper_bound={peak_mem_upper_bound}""", F"""--n_train={self.n_train}""", F"""--n_val={self.n_val}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCAmelCase ,env=os.environ.copy() )
340
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__''' _lowerCAmelCase : Dict = '''Dummy User''' _lowerCAmelCase : Optional[int] = '''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''' _lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co''' _lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(_lowerCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def lowerCamelCase_( ) -> str: '''simple docstring''' return HfApi(endpoint=_lowerCamelCase ) @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Dict = HfFolder.get_token() HfFolder.save_token(_lowerCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowerCamelCase ) @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' def _cleanup_repo(_lowerCamelCase ): hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def lowerCamelCase_( _lowerCamelCase ) -> List[str]: '''simple docstring''' @contextmanager def _temporary_repo(_lowerCamelCase ): try: yield repo_id finally: cleanup_repo(_lowerCamelCase ) return _temporary_repo @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}""" _lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase ) hf_api.upload_file( token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
340
1
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) _lowerCAmelCase : Optional[int] = ''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class A_ ( _a ): @staticmethod def _lowercase ( __lowerCAmelCase: ArgumentParser ): '''simple docstring''' _lowerCamelCase : List[str] = parser.add_parser( "convert" ,help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." ,) train_parser.add_argument("--model_type" ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help="Model's type." ) train_parser.add_argument( "--tf_checkpoint" ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" ,type=__lowerCAmelCase ,default="" ,help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" ,type=__lowerCAmelCase ,default=__lowerCAmelCase ,help="Optional fine-tuning task name if the TF model was a finetuned model." ,) train_parser.set_defaults(func=__lowerCAmelCase ) def __init__( self: List[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,*__lowerCAmelCase: int ,): '''simple docstring''' _lowerCamelCase : str = logging.get_logger("transformers-cli/converting" ) self._logger.info(F"""Loading model {model_type}""" ) _lowerCamelCase : Union[str, Any] = model_type _lowerCamelCase : Union[str, Any] = tf_checkpoint _lowerCamelCase : str = pytorch_dump_output _lowerCamelCase : Optional[Any] = config _lowerCamelCase : Tuple = finetuning_task_name def _lowercase ( self: List[Any] ): '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__lowerCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__lowerCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__lowerCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(__lowerCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__lowerCAmelCase ) if "ckpt" in self._tf_checkpoint.lower(): _lowerCamelCase : Optional[int] = self._tf_checkpoint _lowerCamelCase : Tuple = "" else: _lowerCamelCase : Union[str, Any] = self._tf_checkpoint _lowerCamelCase : Dict = "" convert_transfo_xl_checkpoint_to_pytorch( __lowerCAmelCase ,self._config ,self._pytorch_dump_output ,__lowerCAmelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__lowerCAmelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__lowerCAmelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
340
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) class A_ ( _a ): def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ): '''simple docstring''' super().__init__() _lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,): '''simple docstring''' for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ): _lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,) # merge samples if i == 0: _lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample else: _lowerCamelCase : Optional[int] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,): '''simple docstring''' _lowerCamelCase : List[Any] = 0 _lowerCamelCase : str = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,) idx += 1 _lowerCamelCase : int = model_path_to_save + F"""_{idx}""" @classmethod def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ): '''simple docstring''' _lowerCamelCase : int = 0 _lowerCamelCase : str = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _lowerCamelCase : Dict = pretrained_model_path while os.path.isdir(__lowerCAmelCase ): _lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) controlnets.append(__lowerCAmelCase ) idx += 1 _lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" ) if len(__lowerCAmelCase ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(__lowerCAmelCase )
340
1
"""simple docstring""" import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase : Optional[Any] = logging.getLogger() def lowerCamelCase_( _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : str = {} _lowerCamelCase : Tuple = os.path.join(_lowerCamelCase , "all_results.json" ) if os.path.exists(_lowerCamelCase ): with open(_lowerCamelCase , "r" ) as f: _lowerCamelCase : Any = json.load(_lowerCamelCase ) else: raise ValueError(F"""can't find {path}""" ) return results _lowerCAmelCase : Optional[Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class A_ ( _a ): def _lowercase ( self: Optional[int] ): '''simple docstring''' import xla_spawn _lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir() _lowerCamelCase : List[str] = F""" ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ): _lowerCamelCase : Union[str, Any] = time() xla_spawn.main() _lowerCamelCase : List[str] = time() _lowerCamelCase : int = get_results(__lowerCAmelCase ) self.assertGreaterEqual(result["eval_accuracy"] ,0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start ,500 ) def _lowercase ( self: Dict ): '''simple docstring''' import xla_spawn _lowerCamelCase : Optional[Any] = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split() with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ): xla_spawn.main()
340
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: '''simple docstring''' _lowerCamelCase : Tuple = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _lowerCamelCase : Tuple = "segformer.encoder." + key if key.startswith("backbone" ): _lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" ) if "norm" in key: _lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" ) if "layer_norm1" in key: _lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )] _lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" ) if "attn.q" in key: _lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _lowerCamelCase : Tuple = key.replace("attn" , "attention.self" ) if "fc1" in key: _lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" ) if "fc2" in key: _lowerCamelCase : Dict = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _lowerCamelCase : int = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" ) _lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )] _lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" ) if key.startswith("head" ): _lowerCamelCase : List[str] = key.replace("head" , "classifier" ) _lowerCamelCase : Union[str, Any] = value return new_state_dict def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _lowerCamelCase : int = kv_weight[ : config.hidden_sizes[i], : ] _lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]] _lowerCamelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCamelCase : Optional[Any] = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase_( ) -> Dict: '''simple docstring''' _lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return image @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Any = SegformerConfig() _lowerCamelCase : int = False # set attributes based on model_name _lowerCamelCase : Any = "huggingface/label-files" if "segformer" in model_name: _lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _lowerCamelCase : str = 150 _lowerCamelCase : Dict = "ade20k-id2label.json" _lowerCamelCase : Dict = (1, 150, 128, 128) elif "city" in model_name: _lowerCamelCase : List[str] = 19 _lowerCamelCase : Tuple = "cityscapes-id2label.json" _lowerCamelCase : Tuple = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: _lowerCamelCase : List[str] = True _lowerCamelCase : Tuple = model_name[4:6] _lowerCamelCase : Tuple = 1000 _lowerCamelCase : List[Any] = "imagenet-1k-id2label.json" _lowerCamelCase : List[Any] = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes _lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = idalabel _lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : int = 256 elif size == "b2": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : List[Any] = 768 _lowerCamelCase : Any = [3, 4, 6, 3] elif size == "b3": _lowerCamelCase : Tuple = [64, 128, 320, 512] _lowerCamelCase : Union[str, Any] = 768 _lowerCamelCase : Optional[Any] = [3, 4, 18, 3] elif size == "b4": _lowerCamelCase : str = [64, 128, 320, 512] _lowerCamelCase : Optional[Any] = 768 _lowerCamelCase : Dict = [3, 8, 27, 3] elif size == "b5": _lowerCamelCase : int = [64, 128, 320, 512] _lowerCamelCase : Tuple = 768 _lowerCamelCase : Tuple = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) _lowerCamelCase : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase ) # prepare image _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: _lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) ) else: _lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_lowerCamelCase , _lowerCamelCase ) # create HuggingFace model and load state dict if encoder_only: _lowerCamelCase : Tuple = False _lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase ) else: _lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() # forward pass _lowerCamelCase : Any = model(_lowerCamelCase ) _lowerCamelCase : Dict = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCamelCase : str = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]], [[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]], [[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCamelCase : int = torch.tensor( [ [[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]], [[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]], [[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCamelCase : Optional[Any] = torch.tensor( [ [[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]], [[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]], [[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]], [[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]], [[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCamelCase : Any = torch.tensor( [ [[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]], [[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]], [[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCamelCase : Dict = torch.tensor( [ [[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]], [[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]], [[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCamelCase : Optional[int] = torch.tensor( [ [[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]], [[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]], [[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCamelCase : Union[str, Any] = torch.tensor( [ [[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]], [[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]], [[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCamelCase : List[Any] = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCamelCase : Tuple = torch.tensor( [ [[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]], [[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]], [[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCamelCase : Any = torch.tensor( [ [[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]], [[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]], [[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCamelCase : List[str] = torch.tensor( [ [[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]], [[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]], [[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCamelCase : str = torch.tensor( [ [[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]], [[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]], [[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]], ] ) else: _lowerCamelCase : Dict = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _lowerCAmelCase : str = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
340
1
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None: '''simple docstring''' _lowerCamelCase : Any = len(_lowerCamelCase ) print("The following activities are selected:" ) # The first activity is always selected _lowerCamelCase : Union[str, Any] = 0 print(_lowerCamelCase , end="," ) # Consider rest of the activities for j in range(_lowerCamelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(_lowerCamelCase , end="," ) _lowerCamelCase : Dict = j if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Tuple = [1, 3, 0, 5, 8, 5] _lowerCAmelCase : Union[str, Any] = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
340
"""simple docstring""" _lowerCAmelCase : dict[tuple[int, int, int], int] = {} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on _lowerCamelCase : Optional[int] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one _lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 _lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter _lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 ) _lowerCamelCase : List[Any] = state_late + state_absent + state_ontime _lowerCamelCase : int = prizestrings return prizestrings def lowerCamelCase_( _lowerCamelCase = 30 ) -> int: '''simple docstring''' return _calculate(_lowerCamelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
340
1
"""simple docstring""" import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def lowerCamelCase_( _lowerCamelCase="" ) -> str: '''simple docstring''' _lowerCamelCase : List[Any] = tempfile.mkdtemp() return os.path.join(_lowerCamelCase , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class A_ ( unittest.TestCase ): def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[str] = torch.rand(12 ,dtype=torch.floataa ) - 0.5 _lowerCamelCase : Optional[int] = AgentAudio(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(__lowerCAmelCase ,agent_type.to_raw() ,atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(__lowerCAmelCase ) ) # Ensure that the file contains the same value as the original tensor _lowerCamelCase, _lowerCamelCase : int = sf.read(__lowerCAmelCase ) self.assertTrue(torch.allclose(__lowerCAmelCase ,torch.tensor(__lowerCAmelCase ) ,atol=1e-4 ) ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : int = torch.rand(12 ,dtype=torch.floataa ) - 0.5 _lowerCamelCase : Union[str, Any] = get_new_path(suffix=".wav" ) sf.write(__lowerCAmelCase ,__lowerCAmelCase ,16_000 ) _lowerCamelCase : int = AgentAudio(__lowerCAmelCase ) self.assertTrue(torch.allclose(__lowerCAmelCase ,agent_type.to_raw() ,atol=1e-4 ) ) self.assertEqual(agent_type.to_string() ,__lowerCAmelCase ) @require_vision @require_torch class A_ ( unittest.TestCase ): def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : str = torch.randint(0 ,256 ,(64, 64, 3) ) _lowerCamelCase : List[Any] = AgentImage(__lowerCAmelCase ) _lowerCamelCase : Tuple = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(__lowerCAmelCase ,agent_type._tensor ,atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw() ,Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__lowerCAmelCase ) ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Any = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" _lowerCamelCase : str = Image.open(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = AgentImage(__lowerCAmelCase ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__lowerCAmelCase ) ) def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : List[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" _lowerCamelCase : Tuple = Image.open(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = AgentImage(__lowerCAmelCase ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__lowerCAmelCase ) ) class A_ ( unittest.TestCase ): def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Any = "Hey!" _lowerCamelCase : Optional[int] = AgentText(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase ,agent_type.to_string() ) self.assertEqual(__lowerCAmelCase ,agent_type.to_raw() ) self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
340
"""simple docstring""" from __future__ import annotations def lowerCamelCase_( _lowerCamelCase ) -> bool: '''simple docstring''' _lowerCamelCase : int = str(_lowerCamelCase ) return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" ) def lowerCamelCase_( ) -> int | None: '''simple docstring''' for base_num in range(9999 , 4999 , -1 ): _lowerCamelCase : Union[str, Any] = 100002 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate for base_num in range(333 , 99 , -1 ): _lowerCamelCase : Tuple = 1002003 * base_num if is_9_pandigital(_lowerCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
340
1
"""simple docstring""" import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging _lowerCAmelCase : str = logging.get_logger(__name__) class A_ ( _a ): lowerCAmelCase__ = CLIPConfig lowerCAmelCase__ = ['CLIPEncoderLayer'] def __init__( self: Tuple ,__lowerCAmelCase: CLIPConfig ): '''simple docstring''' super().__init__(__lowerCAmelCase ) _lowerCamelCase : int = CLIPVisionModelWithProjection(config.vision_config ) _lowerCamelCase : int = nn.Linear(config.vision_config.projection_dim ,1 ) _lowerCamelCase : str = nn.Linear(config.vision_config.projection_dim ,1 ) @torch.no_grad() def _lowercase ( self: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any]=0.5 ,__lowerCAmelCase: Optional[Any]=0.5 ): '''simple docstring''' _lowerCamelCase : int = self.vision_model(__lowerCAmelCase )[0] _lowerCamelCase : List[Any] = self.p_head(__lowerCAmelCase ) _lowerCamelCase : Any = nsfw_detected.flatten() _lowerCamelCase : Optional[Any] = nsfw_detected > p_threshold _lowerCamelCase : List[str] = nsfw_detected.tolist() if any(__lowerCAmelCase ): logger.warning( "Potential NSFW content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, nsfw_detected_ in enumerate(__lowerCAmelCase ): if nsfw_detected_: _lowerCamelCase : str = np.zeros(images[idx].shape ) _lowerCamelCase : Tuple = self.w_head(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = watermark_detected.flatten() _lowerCamelCase : int = watermark_detected > w_threshold _lowerCamelCase : List[Any] = watermark_detected.tolist() if any(__lowerCAmelCase ): logger.warning( "Potential watermarked content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, watermark_detected_ in enumerate(__lowerCAmelCase ): if watermark_detected_: _lowerCamelCase : Tuple = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
340
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A_ ( _a ): lowerCAmelCase__ = 'char' lowerCAmelCase__ = 'bpe' lowerCAmelCase__ = 'wp' _lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A_ ( _a ): lowerCAmelCase__ = ['image_processor', 'char_tokenizer'] lowerCAmelCase__ = 'ViTImageProcessor' lowerCAmelCase__ = 'MgpstrTokenizer' def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,__lowerCAmelCase ,) _lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" ) _lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) _lowerCamelCase : List[str] = tokenizer _lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" ) _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ): '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is not None: _lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _lowerCamelCase : Tuple = encodings["input_ids"] return inputs def _lowercase ( self: int ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences _lowerCamelCase : Dict = char_preds.size(0 ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" ) _lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" ) _lowerCamelCase : List[str] = [] _lowerCamelCase : str = [] for i in range(__lowerCAmelCase ): _lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowerCamelCase : Tuple = {} _lowerCamelCase : Tuple = final_strs _lowerCamelCase : int = final_scores _lowerCamelCase : str = char_strs _lowerCamelCase : Dict = bpe_strs _lowerCamelCase : int = wp_strs return out def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ): '''simple docstring''' if format == DecodeType.CHARACTER: _lowerCamelCase : int = self.char_decode _lowerCamelCase : List[str] = 1 _lowerCamelCase : Optional[int] = "[s]" elif format == DecodeType.BPE: _lowerCamelCase : Dict = self.bpe_decode _lowerCamelCase : str = 2 _lowerCamelCase : Union[str, Any] = "#" elif format == DecodeType.WORDPIECE: _lowerCamelCase : int = self.wp_decode _lowerCamelCase : List[str] = 102 _lowerCamelCase : List[Any] = "[SEP]" else: raise ValueError(F"""Format {format} is not supported.""" ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Any = pred_logits.size(0 ) _lowerCamelCase : int = pred_logits.size(1 ) _lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:] _lowerCamelCase : List[str] = decoder(__lowerCAmelCase ) _lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 ) _lowerCamelCase : Any = preds_max_prob[:, 1:] for index in range(__lowerCAmelCase ): _lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = preds_str[index][:pred_eos] _lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist() _lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1 _lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1] _lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowerCAmelCase ) conf_scores.append(__lowerCAmelCase ) return dec_strs, conf_scores def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(__lowerCAmelCase ) def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )] return decode_strs
340
1
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : Union[str, Any] = 384 _lowerCamelCase : List[str] = 7 if "tiny" in model_name: _lowerCamelCase : Optional[Any] = 96 _lowerCamelCase : int = (2, 2, 6, 2) _lowerCamelCase : Dict = (3, 6, 12, 24) elif "small" in model_name: _lowerCamelCase : List[Any] = 96 _lowerCamelCase : int = (2, 2, 18, 2) _lowerCamelCase : Tuple = (3, 6, 12, 24) elif "base" in model_name: _lowerCamelCase : List[Any] = 128 _lowerCamelCase : Optional[int] = (2, 2, 18, 2) _lowerCamelCase : str = (4, 8, 16, 32) _lowerCamelCase : Union[str, Any] = 12 _lowerCamelCase : Union[str, Any] = 512 elif "large" in model_name: _lowerCamelCase : List[Any] = 192 _lowerCamelCase : List[Any] = (2, 2, 18, 2) _lowerCamelCase : Tuple = (6, 12, 24, 48) _lowerCamelCase : Union[str, Any] = 12 _lowerCamelCase : Dict = 768 # set label information _lowerCamelCase : int = 150 _lowerCamelCase : List[Any] = "huggingface/label-files" _lowerCamelCase : Tuple = "ade20k-id2label.json" _lowerCamelCase : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()} _lowerCamelCase : Optional[Any] = SwinConfig( embed_dim=_lowerCamelCase , depths=_lowerCamelCase , num_heads=_lowerCamelCase , window_size=_lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , ) _lowerCamelCase : List[Any] = UperNetConfig( backbone_config=_lowerCamelCase , auxiliary_in_channels=_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , ) return config def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Optional[Any] = [] # fmt: off # stem rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: '''simple docstring''' _lowerCamelCase : Any = dct.pop(_lowerCamelCase ) _lowerCamelCase : List[str] = val def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowerCamelCase : Optional[Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowerCamelCase : List[Any] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" ) _lowerCamelCase : List[str] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Any = in_proj_weight[:dim, :] _lowerCamelCase : str = in_proj_bias[: dim] _lowerCamelCase : Tuple = in_proj_weight[ dim : dim * 2, : ] _lowerCamelCase : Optional[int] = in_proj_bias[ dim : dim * 2 ] _lowerCamelCase : List[Any] = in_proj_weight[ -dim :, : ] _lowerCamelCase : List[Any] = in_proj_bias[-dim :] # fmt: on def lowerCamelCase_( _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase, _lowerCamelCase : int = x.shape _lowerCamelCase : Optional[int] = x.reshape(_lowerCamelCase , 4 , in_channel // 4 ) _lowerCamelCase : Optional[Any] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_lowerCamelCase , _lowerCamelCase ) return x def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Dict = x.shape _lowerCamelCase : int = x.reshape(_lowerCamelCase , in_channel // 4 , 4 ) _lowerCamelCase : Tuple = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_lowerCamelCase , _lowerCamelCase ) return x def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : Optional[int] = x.shape[0] _lowerCamelCase : List[str] = x.reshape(4 , in_channel // 4 ) _lowerCamelCase : List[str] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_lowerCamelCase ) return x def lowerCamelCase_( _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Dict = x.shape[0] _lowerCamelCase : int = x.reshape(in_channel // 4 , 4 ) _lowerCamelCase : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_lowerCamelCase ) return x def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : List[str] = { "upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth", "upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth", "upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth", "upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth", } _lowerCamelCase : Tuple = model_name_to_url[model_name] _lowerCamelCase : int = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" , file_name=_lowerCamelCase )[ "state_dict" ] for name, param in state_dict.items(): print(_lowerCamelCase , param.shape ) _lowerCamelCase : Optional[Any] = get_upernet_config(_lowerCamelCase ) _lowerCamelCase : List[str] = UperNetForSemanticSegmentation(_lowerCamelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _lowerCamelCase : str = state_dict.pop(_lowerCamelCase ) if "bn" in key: _lowerCamelCase : Any = key.replace("bn" , "batch_norm" ) _lowerCamelCase : Optional[Any] = val # rename keys _lowerCamelCase : Dict = create_rename_keys(_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) read_in_q_k_v(_lowerCamelCase , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: _lowerCamelCase : Tuple = reverse_correct_unfold_reduction_order(_lowerCamelCase ) if "norm" in key: _lowerCamelCase : Tuple = reverse_correct_unfold_norm_order(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) # verify on image _lowerCamelCase : Tuple = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" _lowerCamelCase : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" ) _lowerCamelCase : Optional[int] = SegformerImageProcessor() _lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values with torch.no_grad(): _lowerCamelCase : int = model(_lowerCamelCase ) _lowerCamelCase : str = outputs.logits print(logits.shape ) print("First values of logits:" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": _lowerCamelCase : List[Any] = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": _lowerCamelCase : Any = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": _lowerCamelCase : Any = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": _lowerCamelCase : str = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCamelCase ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[f'''upernet-swin-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _lowerCAmelCase : List[Any] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
340
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _lowerCAmelCase : List[Any] = get_logger(__name__) class A_ : lowerCAmelCase__ = 'dummy_data' lowerCAmelCase__ = 'datasets' lowerCAmelCase__ = False def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,): '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : List[str] = dataset_name _lowerCamelCase : Optional[int] = cache_dir _lowerCamelCase : Optional[int] = use_local_dummy_data _lowerCamelCase : int = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : int = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : Tuple = str(__lowerCAmelCase ) # to be downloaded _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Dict = None @property def _lowercase ( self: str ): '''simple docstring''' if self._dummy_file is None: _lowerCamelCase : List[str] = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self: str ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : Optional[int] = cached_path( __lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase ) return os.path.join(__lowerCAmelCase ,self.dummy_file_name ) @property def _lowercase ( self: Tuple ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def _lowercase ( self: List[str] ): '''simple docstring''' if self._bucket_url is None: _lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Tuple = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,(list, tuple) ): return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase ) else: return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' return path def _lowercase ( self: Optional[int] ): '''simple docstring''' return {} def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): for single_url in single_urls: download_callback(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = single_urls download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls] else: _lowerCamelCase : Union[str, Any] = single_urls _lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) _lowerCamelCase : List[Any] = value # make sure that values are unique if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url ) _lowerCamelCase : Optional[Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__lowerCAmelCase ) return dummy_data_list def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' def _iter_archive_members(__lowerCAmelCase: Any ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : Tuple = Path(self.dummy_file ).parent _lowerCamelCase : str = path.relative_to(__lowerCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase ) _lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = [paths] for path in paths: if os.path.isfile(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__lowerCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
340
1
"""simple docstring""" # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {} _lowerCAmelCase : Dict[Optional[str], str] = {} _lowerCAmelCase : Dict[Optional[str], Exception] = {} def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , ) -> Any: '''simple docstring''' _lowerCamelCase : str = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( F"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" ) _lowerCamelCase : Optional[int] = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( F"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" ) _lowerCamelCase : int = format_type def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : Union[str, Any] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): _lowerCamelCase : Any = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: _lowerCAmelCase : str = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: _lowerCAmelCase : Optional[int] = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: _lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def lowerCamelCase_( _lowerCamelCase ) -> Optional[str]: '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def lowerCamelCase_( _lowerCamelCase , **_lowerCamelCase ) -> Formatter: '''simple docstring''' _lowerCamelCase : str = get_format_type_from_alias(_lowerCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**_lowerCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
340
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) _lowerCamelCase : int = precision _lowerCamelCase : Dict = ceil(precision / 14 ) _lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt() _lowerCamelCase : int = 1 _lowerCamelCase : Optional[int] = 13591409 _lowerCamelCase : int = Decimal(_lowerCamelCase ) for k in range(1 , _lowerCamelCase ): _lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
340
1
"""simple docstring""" import datasets _lowerCAmelCase : List[Any] = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' _lowerCAmelCase : List[str] = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' _lowerCAmelCase : Dict = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): def _lowercase ( self: Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) ,codebase_urls=[] ,reference_urls=[] ,format="numpy" ,) def _lowercase ( self: Any ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Union[str, Any] ): '''simple docstring''' return {"accuracy": simple_accuracy(__lowerCAmelCase ,__lowerCAmelCase )}
340
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A_ ( _a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = None def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_lowerCamelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_lowerCamelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowerCamelCase : str = [] for i in range(_lowerCamelCase ): _lowerCamelCase : Any = i / num_diffusion_timesteps _lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) ) return torch.tensor(_lowerCamelCase , dtype=torch.floataa ) class A_ ( _a , _a ): @register_to_config def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) _lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = 1.0 - self.betas _lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 ) _lowerCamelCase : int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution _lowerCamelCase : Tuple = 1.0 # setable values _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() ) _lowerCamelCase : List[str] = variance_type def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ): '''simple docstring''' return sample def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ): '''simple docstring''' _lowerCamelCase : str = num_inference_steps _lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) _lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) _lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ): '''simple docstring''' if prev_timestep is None: _lowerCamelCase : List[str] = t - 1 _lowerCamelCase : Optional[int] = self.alphas_cumprod[t] _lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : List[Any] = self.betas[t] else: _lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: _lowerCamelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": _lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) ) _lowerCamelCase : str = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler _lowerCamelCase : str = variance.log() _lowerCamelCase : str = beta.log() _lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2 _lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,): '''simple docstring''' _lowerCamelCase : str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": _lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 ) else: _lowerCamelCase : List[Any] = None # 1. compute alphas, betas if prev_timestep is None: _lowerCamelCase : List[Any] = t - 1 _lowerCamelCase : Dict = self.alphas_cumprod[t] _lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one _lowerCamelCase : Dict = 1 - alpha_prod_t _lowerCamelCase : List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: _lowerCamelCase : Any = self.betas[t] _lowerCamelCase : str = self.alphas[t] else: _lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev _lowerCamelCase : Optional[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _lowerCamelCase : List[Any] = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: _lowerCamelCase : Any = torch.clamp( __lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t _lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _lowerCamelCase : Union[str, Any] = 0 if t > 0: _lowerCamelCase : Dict = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device ) _lowerCamelCase : Any = self._get_variance( __lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,) if self.variance_type == "fixed_small_log": _lowerCamelCase : Optional[Any] = variance elif self.variance_type == "learned_range": _lowerCamelCase : Optional[int] = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) _lowerCamelCase : Dict = variance * variance_noise _lowerCamelCase : List[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,): '''simple docstring''' _lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) _lowerCamelCase : Any = timesteps.to(original_samples.device ) _lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5 _lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 _lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): _lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) _lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
340
1
"""simple docstring""" import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _lowerCAmelCase : Dict = get_tests_dir('''fixtures''') class A_ ( unittest.TestCase ): def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Any = mock.Mock() _lowerCamelCase : List[Any] = 500 _lowerCamelCase : Any = {} _lowerCamelCase : int = HTTPError _lowerCamelCase : List[str] = {} # Download this model to make sure it's in the cache. _lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" ,return_value=__lowerCAmelCase ) as mock_head: _lowerCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class A_ ( unittest.TestCase ): @classmethod def _lowercase ( cls: List[str] ): '''simple docstring''' _lowerCamelCase : Any = TOKEN HfFolder.save_token(__lowerCAmelCase ) @classmethod def _lowercase ( cls: Optional[int] ): '''simple docstring''' try: delete_repo(token=cls._token ,repo_id="test-feature-extractor" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="valid_org/test-feature-extractor-org" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="test-dynamic-feature-extractor" ) except HTTPError: pass def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase ) feature_extractor.push_to_hub("test-feature-extractor" ,use_auth_token=self._token ) _lowerCamelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) ) # Reset repo delete_repo(token=self._token ,repo_id="test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( __lowerCAmelCase ,repo_id="test-feature-extractor" ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token ) _lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase ) feature_extractor.push_to_hub("valid_org/test-feature-extractor" ,use_auth_token=self._token ) _lowerCamelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) ) # Reset repo delete_repo(token=self._token ,repo_id="valid_org/test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( __lowerCAmelCase ,repo_id="valid_org/test-feature-extractor-org" ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token ) _lowerCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() _lowerCamelCase : List[Any] = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase ) feature_extractor.push_to_hub("test-dynamic-feature-extractor" ,use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map ,{"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} ,) _lowerCamelCase : Tuple = AutoFeatureExtractor.from_pretrained( F"""{USER}/test-dynamic-feature-extractor""" ,trust_remote_code=__lowerCAmelCase ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ ,"CustomFeatureExtractor" )
340
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : str = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' for attribute in key.split("." ): _lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: _lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: _lowerCamelCase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCamelCase : Tuple = value elif weight_type == "weight_g": _lowerCamelCase : List[str] = value elif weight_type == "weight_v": _lowerCamelCase : List[Any] = value elif weight_type == "bias": _lowerCamelCase : str = value elif weight_type == "running_mean": _lowerCamelCase : Optional[int] = value elif weight_type == "running_var": _lowerCamelCase : Optional[Any] = value elif weight_type == "num_batches_tracked": _lowerCamelCase : int = value elif weight_type == "inv_freq": _lowerCamelCase : List[str] = value else: _lowerCamelCase : Optional[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = fairseq_model.state_dict() _lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) _lowerCamelCase : List[Any] = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _lowerCamelCase : int = True if "*" in mapped_key: _lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2] _lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase ) if "pos_bias_u" in name: _lowerCamelCase : int = None elif "pos_bias_v" in name: _lowerCamelCase : Any = None elif "weight_g" in name: _lowerCamelCase : Any = "weight_g" elif "weight_v" in name: _lowerCamelCase : Any = "weight_v" elif "bias" in name: _lowerCamelCase : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCamelCase : Dict = "weight" elif "running_mean" in name: _lowerCamelCase : str = "running_mean" elif "inv_freq" in name: _lowerCamelCase : List[Any] = "inv_freq" elif "running_var" in name: _lowerCamelCase : Tuple = "running_var" elif "num_batches_tracked" in name: _lowerCamelCase : str = "num_batches_tracked" else: _lowerCamelCase : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : int = full_name.split("conv_layers." )[-1] _lowerCamelCase : List[Any] = name.split("." ) _lowerCamelCase : Union[str, Any] = int(items[0] ) _lowerCamelCase : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCamelCase : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCamelCase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCamelCase : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCamelCase : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict: '''simple docstring''' if config_path is not None: _lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" ) else: _lowerCamelCase : Dict = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCamelCase : List[Any] = "rotary" if is_finetuned: if dict_path: _lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : Optional[int] = target_dict.pad_index _lowerCamelCase : Dict = target_dict.bos_index _lowerCamelCase : Optional[Any] = target_dict.eos_index _lowerCamelCase : str = len(target_dict.symbols ) _lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) _lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False _lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) _lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) _lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase ) else: _lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" ) _lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) _lowerCamelCase : Dict = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
340
1