code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
def __lowerCamelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int ): """simple docstring""" if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError('''The length of profit and weight must be same.''' ) if max_weight <= 0: raise ValueError('''max_weight must greater than zero.''' ) if any(p < 0 for p in profit ): raise ValueError('''Profit can not be negative.''' ) if any(w < 0 for w in weight ): raise ValueError('''Weight can not be negative.''' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. a :Optional[int] = [p / w for p, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )] # Creating a copy of the list and sorting profit/weight in ascending order a :List[Any] = sorted(UpperCAmelCase_ ) # declaring useful variables a :Dict = len(UpperCAmelCase_ ) a :Tuple = 0 a :List[Any] = 0 a :str = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight a :List[Any] = sorted_profit_by_weight[length - i - 1] a :Optional[Any] = profit_by_weight.index(UpperCAmelCase_ ) a :Optional[int] = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( '''Input profits, weights, and then max_weight (all positive ints) separated by ''' '''spaces.''' ) snake_case : Union[str, Any] = [int(x) for x in input('''Input profits separated by spaces: ''').split()] snake_case : Tuple = [int(x) for x in input('''Input weights separated by spaces: ''').split()] snake_case : str = int(input('''Max weight allowed: ''')) # Function Call calc_profit(profit, weight, max_weight)
94
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter snake_case : int = '''Create a default config file for Accelerate with only a few flags set.''' def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any]="no" , UpperCAmelCase_ : str = default_json_config_file , UpperCAmelCase_ : bool = False ): """simple docstring""" a :List[str] = Path(UpperCAmelCase_ ) path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False a :Optional[Any] = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) a :List[Any] = { '''compute_environment''': '''LOCAL_MACHINE''', '''mixed_precision''': mixed_precision, } if torch.cuda.is_available(): a :Dict = torch.cuda.device_count() a :Tuple = num_gpus a :int = False if num_gpus > 1: a :str = '''MULTI_GPU''' else: a :List[Any] = '''NO''' elif is_xpu_available() and use_xpu: a :List[Any] = torch.xpu.device_count() a :Optional[int] = num_xpus a :List[Any] = False if num_xpus > 1: a :int = '''MULTI_XPU''' else: a :str = '''NO''' elif is_npu_available(): a :List[str] = torch.npu.device_count() a :Any = num_npus a :Optional[int] = False if num_npus > 1: a :List[str] = '''MULTI_NPU''' else: a :Dict = '''NO''' else: a :str = 0 a :Optional[Any] = True a :Optional[Any] = 1 a :str = '''NO''' a :List[str] = ClusterConfig(**UpperCAmelCase_ ) config.to_json_file(UpperCAmelCase_ ) return path def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a :List[Any] = parser.add_parser('''default''' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ ) parser.add_argument( '''--config_file''' , default=UpperCAmelCase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=UpperCAmelCase_ , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=UpperCAmelCase_ ) return parser def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" a :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
94
1
import os import re import shutil import sys import tempfile import unittest import black snake_case : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. snake_case : int = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) ) a :List[str] = self.transformer_dir shutil.copy( os.path.join(_lowerCamelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , ) def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = '''src/transformers''' shutil.rmtree(self.transformer_dir ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ): a :Dict = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: a :str = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result a :Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) a :Any = black.format_str(_lowerCamelCase , mode=_lowerCamelCase ) a :List[str] = os.path.join(self.transformer_dir , '''new_code.py''' ) with open(_lowerCamelCase , '''w''' , newline='''\n''' ) as f: f.write(_lowerCamelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_lowerCamelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_lowerCamelCase ) with open(_lowerCamelCase , '''r''' ) as f: self.assertTrue(f.read() , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Any = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): # Base copy consistency self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , _lowerCamelCase , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , _lowerCamelCase ) , ) # Copy consistency with a really long name a :str = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('''Bert''' , _lowerCamelCase , _lowerCamelCase ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , _lowerCamelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , _lowerCamelCase ) , ) def SCREAMING_SNAKE_CASE__ ( self ): a :Any = check_copies.LOCALIZED_READMES['''README_zh-hans.md'''] a :List[str] = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),''' ''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**''' ''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders''' ''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang''' ''' Luong, Quoc V. Le, Christopher D. Manning.''' ) a :Union[str, Any] = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) a :List[str] = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文''' ''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自''' ''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather''' ''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,''' ''' Christopher D. Manning 发布。\n''' ) a , a :Optional[Any] = check_copies.convert_to_localized_md( _lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] ) self.assertFalse(_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) a , a :Optional[int] = check_copies.convert_to_localized_md( _lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(_lowerCamelCase ) a :str = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.''' ) a :Dict = ( '''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and''' ''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) a :str = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) a , a :int = check_copies.convert_to_localized_md( _lowerCamelCase , _lowerCamelCase , localized_readme['''format_model_list'''] ) # Check if the model link is synchronized. self.assertEqual(_lowerCamelCase , _lowerCamelCase )
94
import sys snake_case : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowerCamelCase ( UpperCAmelCase_ : str = N ): """simple docstring""" a :Optional[Any] = -sys.maxsize - 1 for i in range(len(UpperCAmelCase_ ) - 12 ): a :Dict = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: a :str = product return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
94
1
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case : Optional[int] = logging.get_logger(__name__) snake_case : int = {'''vocab_file''': '''vocab.json'''} snake_case : Union[str, Any] = { '''vocab_file''': { '''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''', } } snake_case : str = {'''mgp-str''': 27} class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _lowerCamelCase , _lowerCamelCase="[GO]" , _lowerCamelCase="[GO]" , _lowerCamelCase="[s]" , _lowerCamelCase="[GO]" , **_lowerCamelCase ): super().__init__( unk_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , pad_token=_lowerCamelCase , **_lowerCamelCase , ) with open(_lowerCamelCase , encoding='''utf-8''' ) as vocab_handle: a :Any = json.load(_lowerCamelCase ) a :Any = {v: k for k, v in self.vocab.items()} @property def SCREAMING_SNAKE_CASE__ ( self ): return len(self.vocab ) def SCREAMING_SNAKE_CASE__ ( self ): return dict(self.vocab , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :List[Any] = [] for s in text: char_tokens.extend(_lowerCamelCase ) return char_tokens def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.vocab.get(_lowerCamelCase , self.vocab.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.decoder.get(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if not os.path.isdir(_lowerCamelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowerCamelCase ) ) return a :Dict = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '''\n''' ) return (vocab_file,)
94
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]="attention" ): """simple docstring""" a :Optional[int] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel'''] a :int = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel'''] return k, o, q, v def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=False ): """simple docstring""" if split_mlp_wi: a :int = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel'''] a :Dict = (wi_a, wi_a) else: a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel'''] a :Dict = params[F'''{prefix}/layers_{i}/mlp/wo/kernel'''] return wi, wo def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ): """simple docstring""" return params[F'''{prefix}/layers_{i}/{layer_name}/scale'''] def __lowerCamelCase ( UpperCAmelCase_ : dict , *, UpperCAmelCase_ : int , UpperCAmelCase_ : bool ): """simple docstring""" a :str = traverse_util.flatten_dict(variables['''target'''] ) a :Any = {'''/'''.join(UpperCAmelCase_ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi a :Any = '''encoder/layers_0/mlp/wi_0/kernel''' in old print('''Split MLP:''' , UpperCAmelCase_ ) a :Optional[Any] = collections.OrderedDict() # Shared embeddings. a :Union[str, Any] = old['''token_embedder/embedding'''] # Encoder. for i in range(UpperCAmelCase_ ): # Block i, layer 0 (Self Attention). a :Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_attention_layer_norm''' ) a , a , a , a :Optional[int] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''attention''' ) a :List[Any] = layer_norm a :str = k.T a :Dict = o.T a :int = q.T a :Optional[Any] = v.T # Block i, layer 1 (MLP). a :Tuple = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_mlp_layer_norm''' ) a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , UpperCAmelCase_ ) a :Any = layer_norm if split_mlp_wi: a :Any = wi[0].T a :Tuple = wi[1].T else: a :List[str] = wi.T a :List[Any] = wo.T a :Union[str, Any] = old[ '''encoder/relpos_bias/rel_embedding''' ].T a :Optional[Any] = old['''encoder/encoder_norm/scale'''] if not is_encoder_only: # Decoder. for i in range(UpperCAmelCase_ ): # Block i, layer 0 (Self Attention). a :List[str] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_self_attention_layer_norm''' ) a , a , a , a :List[Any] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''self_attention''' ) a :List[Any] = layer_norm a :Tuple = k.T a :int = o.T a :Any = q.T a :Optional[int] = v.T # Block i, layer 1 (Cross Attention). a :str = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_cross_attention_layer_norm''' ) a , a , a , a :Any = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''encoder_decoder_attention''' ) a :str = layer_norm a :Optional[Any] = k.T a :Any = o.T a :Dict = q.T a :Optional[Any] = v.T # Block i, layer 2 (MLP). a :Optional[int] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_mlp_layer_norm''' ) a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , UpperCAmelCase_ ) a :Optional[int] = layer_norm if split_mlp_wi: a :int = wi[0].T a :Tuple = wi[1].T else: a :str = wi.T a :Dict = wo.T a :Any = old['''decoder/decoder_norm/scale'''] a :Optional[Any] = old[ '''decoder/relpos_bias/rel_embedding''' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: a :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T return new def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : bool ): """simple docstring""" a :List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: a :Optional[Any] = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: a :Tuple = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) a :Optional[Any] = state_dict['''shared.weight'''] return state_dict def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ): """simple docstring""" a :Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase_ ) a :Optional[int] = convert_tax_to_pytorch(UpperCAmelCase_ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase_ ) a :Tuple = make_state_dict(UpperCAmelCase_ , UpperCAmelCase_ ) model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ): """simple docstring""" a :List[Any] = TaConfig.from_json_file(UpperCAmelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: a :Any = TaEncoderModel(UpperCAmelCase_ ) else: a :List[str] = TaForConditionalGeneration(UpperCAmelCase_ ) # Load weights from tf checkpoint load_tax_weights_in_ta(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(UpperCAmelCase_ ) # Verify that we can load the checkpoint. model.from_pretrained(UpperCAmelCase_ ) print('''Done''' ) if __name__ == "__main__": snake_case : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) snake_case : Optional[Any] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
94
1
from __future__ import annotations def __lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float , ): """simple docstring""" if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
94
def __lowerCamelCase ( UpperCAmelCase_ : int = 100_0000 ): """simple docstring""" a :Any = set(range(3 , UpperCAmelCase_ , 2 ) ) primes.add(2 ) for p in range(3 , UpperCAmelCase_ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , UpperCAmelCase_ , UpperCAmelCase_ ) ) ) a :Union[str, Any] = [float(UpperCAmelCase_ ) for n in range(limit + 1 )] for p in primes: for n in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F"""{solution() = }""")
94
1
from __future__ import annotations snake_case : List[Any] = tuple[int, int, int] snake_case : Union[str, Any] = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase snake_case : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' # -------------------------- default selection -------------------------- # rotors -------------------------- snake_case : Optional[Any] = '''EGZWVONAHDCLFQMSIPJBYUKXTR''' snake_case : str = '''FOBHMDKEXQNRAULPGSJVTYICZW''' snake_case : List[Any] = '''ZJXESIUQLHAVRMDOYGTNFWPBKC''' # reflector -------------------------- snake_case : Dict = { '''A''': '''N''', '''N''': '''A''', '''B''': '''O''', '''O''': '''B''', '''C''': '''P''', '''P''': '''C''', '''D''': '''Q''', '''Q''': '''D''', '''E''': '''R''', '''R''': '''E''', '''F''': '''S''', '''S''': '''F''', '''G''': '''T''', '''T''': '''G''', '''H''': '''U''', '''U''': '''H''', '''I''': '''V''', '''V''': '''I''', '''J''': '''W''', '''W''': '''J''', '''K''': '''X''', '''X''': '''K''', '''L''': '''Y''', '''Y''': '''L''', '''M''': '''Z''', '''Z''': '''M''', } # -------------------------- extra rotors -------------------------- snake_case : int = '''RMDJXFUWGISLHVTCQNKYPBEZOA''' snake_case : int = '''SGLCPQWZHKXAREONTFBVIYJUDM''' snake_case : List[str] = '''HVSICLTYKQUBXDWAJZOMFGPREN''' snake_case : Any = '''RZWQHFMVDBKICJLNTUXAGYPSOE''' snake_case : int = '''LFKIJODBEGAMQPXVUHYSTCZRWN''' snake_case : Dict = '''KOAEGVDHXPQZMLFTYWJNBRCIUS''' def __lowerCamelCase ( UpperCAmelCase_ : RotorPositionT , UpperCAmelCase_ : RotorSelectionT , UpperCAmelCase_ : str ): """simple docstring""" if (unique_rotsel := len(set(UpperCAmelCase_ ) )) < 3: a :str = F'''Please use 3 unique rotors (not {unique_rotsel})''' raise Exception(UpperCAmelCase_ ) # Checks if rotor positions are valid a , a , a :Optional[int] = rotpos if not 0 < rotorposa <= len(UpperCAmelCase_ ): a :int = F'''First rotor position is not within range of 1..26 ({rotorposa}''' raise ValueError(UpperCAmelCase_ ) if not 0 < rotorposa <= len(UpperCAmelCase_ ): a :int = F'''Second rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(UpperCAmelCase_ ) if not 0 < rotorposa <= len(UpperCAmelCase_ ): a :Optional[Any] = F'''Third rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(UpperCAmelCase_ ) # Validates string and returns dict a :Dict = _plugboard(UpperCAmelCase_ ) return rotpos, rotsel, pbdict def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a :str = F'''Plugboard setting isn\'t type string ({type(UpperCAmelCase_ )})''' raise TypeError(UpperCAmelCase_ ) elif len(UpperCAmelCase_ ) % 2 != 0: a :Any = F'''Odd number of symbols ({len(UpperCAmelCase_ )})''' raise Exception(UpperCAmelCase_ ) elif pbstring == "": return {} pbstring.replace(''' ''' , '''''' ) # Checks if all characters are unique a :Tuple = set() for i in pbstring: if i not in abc: a :Dict = F'''\'{i}\' not in list of symbols''' raise Exception(UpperCAmelCase_ ) elif i in tmppbl: a :List[Any] = F'''Duplicate symbol ({i})''' raise Exception(UpperCAmelCase_ ) else: tmppbl.add(UpperCAmelCase_ ) del tmppbl # Created the dictionary a :Optional[Any] = {} for j in range(0 , len(UpperCAmelCase_ ) - 1 , 2 ): a :Any = pbstring[j + 1] a :Optional[int] = pbstring[j] return pb def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : RotorPositionT , UpperCAmelCase_ : RotorSelectionT = (rotora, rotora, rotora) , UpperCAmelCase_ : str = "" , ): """simple docstring""" a :Tuple = text.upper() a , a , a :Union[str, Any] = _validator( UpperCAmelCase_ , UpperCAmelCase_ , plugb.upper() ) a , a , a :Optional[Any] = rotor_position a , a , a :List[str] = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 a :Union[str, Any] = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: a :List[str] = plugboard[symbol] # rotor ra -------------------------- a :Dict = abc.index(UpperCAmelCase_ ) + rotorposa a :List[str] = rotora[index % len(UpperCAmelCase_ )] # rotor rb -------------------------- a :List[str] = abc.index(UpperCAmelCase_ ) + rotorposa a :Tuple = rotora[index % len(UpperCAmelCase_ )] # rotor rc -------------------------- a :List[Any] = abc.index(UpperCAmelCase_ ) + rotorposa a :List[Any] = rotora[index % len(UpperCAmelCase_ )] # reflector -------------------------- # this is the reason you don't need another machine to decipher a :Optional[Any] = reflector[symbol] # 2nd rotors a :Dict = abc[rotora.index(UpperCAmelCase_ ) - rotorposa] a :Any = abc[rotora.index(UpperCAmelCase_ ) - rotorposa] a :Optional[int] = abc[rotora.index(UpperCAmelCase_ ) - rotorposa] # 2nd plugboard if symbol in plugboard: a :Optional[Any] = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(UpperCAmelCase_ ): a :str = 0 rotorposa += 1 if rotorposa >= len(UpperCAmelCase_ ): a :List[str] = 0 rotorposa += 1 if rotorposa >= len(UpperCAmelCase_ ): a :Any = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(UpperCAmelCase_ ) return "".join(UpperCAmelCase_ ) if __name__ == "__main__": snake_case : int = '''This is my Python script that emulates the Enigma machine from WWII.''' snake_case : Union[str, Any] = (1, 1, 1) snake_case : Union[str, Any] = '''pictures''' snake_case : Optional[int] = (rotora, rotora, rotora) snake_case : Dict = enigma(message, rotor_pos, rotor_sel, pb) print('''Encrypted message:''', en) print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
94
snake_case : str = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' snake_case : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] snake_case : int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
94
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case : List[str] = { '''configuration_squeezebert''': [ '''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SqueezeBertConfig''', '''SqueezeBertOnnxConfig''', ], '''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Tuple = ['''SqueezeBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : List[str] = [ '''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SqueezeBertForMaskedLM''', '''SqueezeBertForMultipleChoice''', '''SqueezeBertForQuestionAnswering''', '''SqueezeBertForSequenceClassification''', '''SqueezeBertForTokenClassification''', '''SqueezeBertModel''', '''SqueezeBertModule''', '''SqueezeBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
94
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'ClapFeatureExtractor' SCREAMING_SNAKE_CASE__ = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self , _lowerCamelCase , _lowerCamelCase ): super().__init__(_lowerCamelCase , _lowerCamelCase ) def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: a :Optional[int] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if audios is not None: a :Tuple = self.feature_extractor( _lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if text is not None and audios is not None: a :Union[str, Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.tokenizer.model_input_names a :str = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
94
1
import os from typing import Dict, List, Tuple, TypeVar, Union snake_case : str = TypeVar('''T''') snake_case : str = Union[List[T], Tuple[T, ...]] snake_case : Any = Union[T, List[T], Dict[str, T]] snake_case : int = Union[str, bytes, os.PathLike]
94
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=True ): """simple docstring""" model.train() a :str = model(UpperCAmelCase_ ) a :List[str] = F.mse_loss(UpperCAmelCase_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int=False ): """simple docstring""" set_seed(42 ) a :List[Any] = RegressionModel() a :Any = deepcopy(UpperCAmelCase_ ) a :Tuple = RegressionDataset(length=80 ) a :Tuple = DataLoader(UpperCAmelCase_ , batch_size=16 ) model.to(accelerator.device ) if sched: a :str = AdamW(params=model.parameters() , lr=1E-3 ) a :str = AdamW(params=ddp_model.parameters() , lr=1E-3 ) a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 ) a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 ) # Make a copy of `model` if sched: a , a , a , a :List[Any] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: a , a :str = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a , a , a :str = get_training_setup(UpperCAmelCase_ ) # Use a single batch a , a :Dict = next(iter(UpperCAmelCase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a , a :int = accelerator.gather((ddp_input, ddp_target) ) a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: # Sync grads step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :Union[str, Any] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a , a , a :List[str] = get_training_setup(UpperCAmelCase_ ) # Use a single batch a , a :List[str] = next(iter(UpperCAmelCase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a , a :List[Any] = accelerator.gather((ddp_input, ddp_target) ) a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: # Sync grads step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :Any = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=False ): """simple docstring""" a :Optional[int] = Accelerator( split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a , a , a :List[str] = get_training_setup(UpperCAmelCase_ ) for iteration, batch in enumerate(UpperCAmelCase_ ): a , a :List[Any] = batch.values() # Gather the distributed inputs and targs for the base model a , a :List[str] = accelerator.gather((ddp_input, ddp_target) ) a , a :List[str] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCAmelCase_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :List[str] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] GradientState._reset_state() def __lowerCamelCase ( UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[int]=False ): """simple docstring""" a :Optional[Any] = Accelerator( split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a , a , a , a , a , a , a :Optional[Any] = get_training_setup(UpperCAmelCase_ , UpperCAmelCase_ ) for iteration, batch in enumerate(UpperCAmelCase_ ): a , a :int = batch.values() # Gather the distributed inputs and targs for the base model a , a :List[str] = accelerator.gather((ddp_input, ddp_target) ) a , a :str = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCAmelCase_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' a :Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCAmelCase_ )) if accelerator.num_processes > 1: check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def __lowerCamelCase ( ): """simple docstring""" a :Optional[Any] = Accelerator() a :int = RegressionDataset(length=80 ) a :List[str] = DataLoader(UpperCAmelCase_ , batch_size=16 ) a :List[Any] = RegressionDataset(length=96 ) a :Any = DataLoader(UpperCAmelCase_ , batch_size=16 ) a , a :Optional[int] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCAmelCase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ ) if iteration < len(UpperCAmelCase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCAmelCase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ ) if batch_num < len(UpperCAmelCase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __lowerCamelCase ( ): """simple docstring""" a :Optional[int] = Accelerator() a :Optional[int] = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(UpperCAmelCase_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(UpperCAmelCase_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCAmelCase_ , UpperCAmelCase_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCAmelCase_ , UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : Tuple ): """simple docstring""" main() if __name__ == "__main__": main()
94
1
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def __lowerCamelCase ( ): """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
94
def __lowerCamelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int ): """simple docstring""" if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError('''The length of profit and weight must be same.''' ) if max_weight <= 0: raise ValueError('''max_weight must greater than zero.''' ) if any(p < 0 for p in profit ): raise ValueError('''Profit can not be negative.''' ) if any(w < 0 for w in weight ): raise ValueError('''Weight can not be negative.''' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. a :Optional[int] = [p / w for p, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )] # Creating a copy of the list and sorting profit/weight in ascending order a :List[Any] = sorted(UpperCAmelCase_ ) # declaring useful variables a :Dict = len(UpperCAmelCase_ ) a :Tuple = 0 a :List[Any] = 0 a :str = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight a :List[Any] = sorted_profit_by_weight[length - i - 1] a :Optional[Any] = profit_by_weight.index(UpperCAmelCase_ ) a :Optional[int] = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( '''Input profits, weights, and then max_weight (all positive ints) separated by ''' '''spaces.''' ) snake_case : Union[str, Any] = [int(x) for x in input('''Input profits separated by spaces: ''').split()] snake_case : Tuple = [int(x) for x in input('''Input weights separated by spaces: ''').split()] snake_case : str = int(input('''Max weight allowed: ''')) # Function Call calc_profit(profit, weight, max_weight)
94
1
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = (DDPMParallelScheduler,) def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ): a :List[Any] = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**_lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): self.check_over_configs(thresholding=_lowerCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , ) def SCREAMING_SNAKE_CASE__ ( self ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): for t in [0, 500, 999]: self.check_over_forward(time_step=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Any = self.scheduler_classes[0] a :Optional[Any] = self.get_scheduler_config() a :Any = scheduler_class(**_lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = self.scheduler_classes[0] a :Any = self.get_scheduler_config() a :Union[str, Any] = scheduler_class(**_lowerCamelCase ) a :Dict = len(_lowerCamelCase ) a :Tuple = self.dummy_model() a :Optional[Any] = self.dummy_sample_deter a :str = self.dummy_sample_deter + 0.1 a :Optional[Any] = self.dummy_sample_deter - 0.1 a :int = samplea.shape[0] a :List[str] = torch.stack([samplea, samplea, samplea] , dim=0 ) a :List[str] = torch.arange(_lowerCamelCase )[0:3, None].repeat(1 , _lowerCamelCase ) a :str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) a :List[Any] = scheduler.batch_step_no_noise(_lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) a :str = torch.sum(torch.abs(_lowerCamelCase ) ) a :str = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1153.1833 ) < 1e-2 assert abs(result_mean.item() - 0.5005 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.scheduler_classes[0] a :Any = self.get_scheduler_config() a :Dict = scheduler_class(**_lowerCamelCase ) a :Tuple = len(_lowerCamelCase ) a :Optional[int] = self.dummy_model() a :Optional[Any] = self.dummy_sample_deter a :Optional[int] = torch.manual_seed(0 ) for t in reversed(range(_lowerCamelCase ) ): # 1. predict noise residual a :Dict = model(_lowerCamelCase , _lowerCamelCase ) # 2. predict previous mean of sample x_t-1 a :int = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample a :Any = pred_prev_sample a :int = torch.sum(torch.abs(_lowerCamelCase ) ) a :Dict = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = self.scheduler_classes[0] a :List[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) a :Any = scheduler_class(**_lowerCamelCase ) a :Tuple = len(_lowerCamelCase ) a :str = self.dummy_model() a :List[str] = self.dummy_sample_deter a :Any = torch.manual_seed(0 ) for t in reversed(range(_lowerCamelCase ) ): # 1. predict noise residual a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) # 2. predict previous mean of sample x_t-1 a :str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample a :List[str] = pred_prev_sample a :Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) ) a :Any = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = self.scheduler_classes[0] a :str = self.get_scheduler_config() a :Tuple = scheduler_class(**_lowerCamelCase ) a :int = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_lowerCamelCase ) a :List[str] = scheduler.timesteps for i, timestep in enumerate(_lowerCamelCase ): if i == len(_lowerCamelCase ) - 1: a :str = -1 else: a :List[str] = timesteps[i + 1] a :Union[str, Any] = scheduler.previous_timestep(_lowerCamelCase ) a :List[str] = prev_t.item() self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[int] = self.scheduler_classes[0] a :List[Any] = self.get_scheduler_config() a :Dict = scheduler_class(**_lowerCamelCase ) a :int = [100, 87, 50, 51, 0] with self.assertRaises(_lowerCamelCase , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = self.scheduler_classes[0] a :Any = self.get_scheduler_config() a :str = scheduler_class(**_lowerCamelCase ) a :Tuple = [100, 87, 50, 1, 0] a :Optional[Any] = len(_lowerCamelCase ) with self.assertRaises(_lowerCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = self.scheduler_classes[0] a :Optional[Any] = self.get_scheduler_config() a :Dict = scheduler_class(**_lowerCamelCase ) a :int = [scheduler.config.num_train_timesteps] with self.assertRaises( _lowerCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=_lowerCamelCase )
94
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case : Dict = logging.get_logger(__name__) snake_case : Tuple = '''▁''' snake_case : Any = {'''vocab_file''': '''sentencepiece.bpe.model'''} snake_case : Tuple = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } snake_case : int = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask'] def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ): # Mask token behave like a normal word, i.e. include the space before it a :Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token a :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) a :str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token a :Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab a :List[str] = 1 a :Dict = len(self.sp_model ) + self.fairseq_offset a :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): a :List[str] = self.__dict__.copy() a :Optional[int] = None a :int = self.sp_model.serialized_model_proto() return state def __setstate__( self , _lowerCamelCase ): a :Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): a :Union[str, Any] = {} a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a :List[Any] = [self.cls_token_id] a :Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): a :int = [self.sep_token_id] a :int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE__ ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def SCREAMING_SNAKE_CASE__ ( self ): a :Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] a :Optional[Any] = self.sp_model.PieceToId(_lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Tuple = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if not os.path.isdir(_lowerCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return a :int = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , '''wb''' ) as fi: a :List[Any] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
94
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case : Any = logging.get_logger(__name__) def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ): """simple docstring""" a :Any = b.T a :Tuple = np.sum(np.square(UpperCAmelCase_ ) , axis=1 ) a :Union[str, Any] = np.sum(np.square(UpperCAmelCase_ ) , axis=0 ) a :Tuple = np.matmul(UpperCAmelCase_ , UpperCAmelCase_ ) a :List[str] = aa[:, None] - 2 * ab + ba[None, :] return d def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ): """simple docstring""" a :Union[str, Any] = x.reshape(-1 , 3 ) a :List[Any] = squared_euclidean_distance(UpperCAmelCase_ , UpperCAmelCase_ ) return np.argmin(UpperCAmelCase_ , axis=1 ) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = ['pixel_values'] def __init__( self , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = True , **_lowerCamelCase , ): super().__init__(**_lowerCamelCase ) a :int = size if size is not None else {'''height''': 256, '''width''': 256} a :Any = get_size_dict(_lowerCamelCase ) a :List[Any] = np.array(_lowerCamelCase ) if clusters is not None else None a :Optional[Any] = do_resize a :Any = size a :Dict = resample a :Optional[int] = do_normalize a :Optional[int] = do_color_quantize def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ): a :Optional[Any] = get_size_dict(_lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' ) return resize( _lowerCamelCase , size=(size['''height'''], size['''width''']) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , ): a :int = rescale(image=_lowerCamelCase , scale=1 / 127.5 , data_format=_lowerCamelCase ) a :List[Any] = image - 1 return image def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ): a :Optional[int] = do_resize if do_resize is not None else self.do_resize a :Optional[Any] = size if size is not None else self.size a :Optional[int] = get_size_dict(_lowerCamelCase ) a :Union[str, Any] = resample if resample is not None else self.resample a :int = do_normalize if do_normalize is not None else self.do_normalize a :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize a :List[str] = clusters if clusters is not None else self.clusters a :List[str] = np.array(_lowerCamelCase ) a :List[Any] = make_list_of_images(_lowerCamelCase ) if not valid_images(_lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_color_quantize and clusters is None: raise ValueError('''Clusters must be specified if do_color_quantize is True.''' ) # All transformations expect numpy arrays. a :Tuple = [to_numpy_array(_lowerCamelCase ) for image in images] if do_resize: a :List[str] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images] if do_normalize: a :List[Any] = [self.normalize(image=_lowerCamelCase ) for image in images] if do_color_quantize: a :Union[str, Any] = [to_channel_dimension_format(_lowerCamelCase , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) a :Union[str, Any] = np.array(_lowerCamelCase ) a :List[Any] = color_quantize(_lowerCamelCase , _lowerCamelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) a :List[str] = images.shape[0] a :Optional[Any] = images.reshape(_lowerCamelCase , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. a :Optional[int] = list(_lowerCamelCase ) else: a :Tuple = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images] a :Any = {'''input_ids''': images} return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
94
def __lowerCamelCase ( UpperCAmelCase_ : int = 1000 ): """simple docstring""" a , a :int = 1, 1 a :Any = 2 while True: a :Optional[int] = 0 a :str = fa + fa a , a :List[Any] = fa, f index += 1 for _ in str(UpperCAmelCase_ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
94
1
import math from collections.abc import Iterator from itertools import takewhile def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __lowerCamelCase ( ): """simple docstring""" a :Optional[int] = 2 while True: if is_prime(UpperCAmelCase_ ): yield num num += 1 def __lowerCamelCase ( UpperCAmelCase_ : int = 200_0000 ): """simple docstring""" return sum(takewhile(lambda UpperCAmelCase_ : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
94
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=1000 , ): a :str = parent a :str = batch_size a :List[Any] = seq_length a :Union[str, Any] = is_training a :str = use_input_mask a :Tuple = use_token_type_ids a :Optional[int] = use_labels a :Union[str, Any] = vocab_size a :Optional[Any] = hidden_size a :Any = num_hidden_layers a :Optional[int] = num_attention_heads a :Tuple = intermediate_size a :Dict = hidden_act a :str = hidden_dropout_prob a :List[Any] = attention_probs_dropout_prob a :List[Any] = max_position_embeddings a :List[str] = type_vocab_size a :List[Any] = type_sequence_label_size a :Union[str, Any] = initializer_range a :Optional[Any] = num_labels a :Optional[int] = num_choices a :Union[str, Any] = scope a :List[str] = range_bbox def SCREAMING_SNAKE_CASE__ ( self ): a :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment a :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a :List[Any] = bbox[i, j, 3] a :List[str] = bbox[i, j, 1] a :List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: a :Dict = bbox[i, j, 2] a :Dict = bbox[i, j, 0] a :Any = t a :Optional[Any] = tf.convert_to_tensor(_lowerCamelCase ) a :int = None if self.use_input_mask: a :List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a :Optional[int] = None if self.use_token_type_ids: a :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a :List[Any] = None a :List[Any] = None a :List[Any] = None if self.use_labels: a :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a :List[str] = ids_tensor([self.batch_size] , self.num_choices ) a :List[Any] = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = TFLayoutLMModel(config=_lowerCamelCase ) a :Dict = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase , token_type_ids=_lowerCamelCase ) a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :List[str] = TFLayoutLMForMaskedLM(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = self.num_labels a :List[Any] = TFLayoutLMForSequenceClassification(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :int = self.num_labels a :Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[Any] = TFLayoutLMForQuestionAnswering(config=_lowerCamelCase ) a :Optional[int] = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) :List[Any] = config_and_inputs a :Union[str, Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class _snake_case ( _snake_case , _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE__ = ( { 'feature-extraction': TFLayoutLMModel, 'fill-mask': TFLayoutLMForMaskedLM, 'text-classification': TFLayoutLMForSequenceClassification, 'token-classification': TFLayoutLMForTokenClassification, 'zero-shot': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = 10 def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = TFLayoutLMModelTester(self ) a :Dict = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ): a :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a :str = TFLayoutLMModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) @unittest.skip('''Onnx compliancy broke with TF 2.10''' ) def SCREAMING_SNAKE_CASE__ ( self ): pass def __lowerCamelCase ( ): """simple docstring""" a :Tuple = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 a :Any = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 a :List[str] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 a :List[str] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) a :Any = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class _snake_case ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' ) a , a , a , a , a :Optional[Any] = prepare_layoutlm_batch_inputs() # forward pass a :Tuple = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) # test the sequence output on [0, :3, :3] a :List[str] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-3 ) ) # test the pooled output on [1, :3] a :List[str] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized sequence classification head a :str = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 ) a , a , a , a , a :List[str] = prepare_layoutlm_batch_inputs() # forward pass a :List[Any] = model( input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar a :Union[str, Any] = outputs.loss a :Optional[Any] = (2,) self.assertEqual(loss.shape , _lowerCamelCase ) # test the shape of the logits a :Any = outputs.logits a :Tuple = (2, 2) self.assertEqual(logits.shape , _lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized token classification head a :Dict = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 ) a , a , a , a , a :Dict = prepare_layoutlm_batch_inputs() # forward pass a :List[Any] = model( input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) # test the shape of the logits a :Optional[Any] = outputs.logits a :List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized token classification head a :List[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' ) a , a , a , a , a :Any = prepare_layoutlm_batch_inputs() # forward pass a :str = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) # test the shape of the logits a :Optional[int] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _lowerCamelCase ) self.assertEqual(outputs.end_logits.shape , _lowerCamelCase )
94
1
from maths.prime_factors import prime_factors def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a :Dict = F'''Input value of [number={number}] must be an integer''' raise TypeError(UpperCAmelCase_ ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
94
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" while b: a , a :Optional[Any] = b, a % b return a def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b ) def __lowerCamelCase ( ): """simple docstring""" print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
94
1
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets snake_case : Tuple = '''\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } ''' snake_case : int = '''\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. ''' snake_case : Tuple = ''' Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for \'cvit-mkb-clsr\' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: "accuracy": Accuracy "f1": F1 score "precision": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'precision@10\': 1.0} ''' def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" return float((preds == labels).mean() ) def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ): """simple docstring""" a :Tuple = simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ ) a :Optional[int] = float(fa_score(y_true=UpperCAmelCase_ , y_pred=UpperCAmelCase_ ) ) return { "accuracy": acc, "f1": fa, } def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ): """simple docstring""" a :Tuple = np.array(UpperCAmelCase_ ) a :Dict = np.array(UpperCAmelCase_ ) a :List[str] = en_sentvecs.shape[0] # mean centering a :Union[str, Any] = en_sentvecs - np.mean(UpperCAmelCase_ , axis=0 ) a :int = in_sentvecs - np.mean(UpperCAmelCase_ , axis=0 ) a :Optional[int] = cdist(UpperCAmelCase_ , UpperCAmelCase_ , '''cosine''' ) a :str = np.array(range(UpperCAmelCase_ ) ) a :Any = sim.argsort(axis=1 )[:, :10] a :str = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def SCREAMING_SNAKE_CASE__ ( self ): if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), '''references''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(_lowerCamelCase , _lowerCamelCase )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(_lowerCamelCase , _lowerCamelCase ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(_lowerCamelCase , _lowerCamelCase )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' )
94
from __future__ import annotations def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] | None = None , UpperCAmelCase_ : dict[str, float] | None = None , UpperCAmelCase_ : bool = False , ): """simple docstring""" a :str = cipher_alphabet or [chr(UpperCAmelCase_ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) a :List[Any] = { '''a''': 0.08497, '''b''': 0.01492, '''c''': 0.02202, '''d''': 0.04253, '''e''': 0.11162, '''f''': 0.02228, '''g''': 0.02015, '''h''': 0.06094, '''i''': 0.07546, '''j''': 0.00153, '''k''': 0.01292, '''l''': 0.04025, '''m''': 0.02406, '''n''': 0.06749, '''o''': 0.07507, '''p''': 0.01929, '''q''': 0.00095, '''r''': 0.07587, '''s''': 0.06327, '''t''': 0.09356, '''u''': 0.02758, '''v''': 0.00978, '''w''': 0.02560, '''x''': 0.00150, '''y''': 0.01994, '''z''': 0.00077, } else: # Custom frequencies dictionary a :Dict = frequencies_dict if not case_sensitive: a :Union[str, Any] = ciphertext.lower() # Chi squared statistic values a :dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(UpperCAmelCase_ ) ): a :int = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet a :Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( UpperCAmelCase_ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter a :List[Any] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: a :Optional[int] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message a :List[Any] = decrypted_with_shift.lower().count(UpperCAmelCase_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies a :Dict = frequencies[letter] * occurrences # Complete the chi squared statistic formula a :Any = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message a :int = decrypted_with_shift.count(UpperCAmelCase_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies a :Tuple = frequencies[letter] * occurrences # Complete the chi squared statistic formula a :Optional[Any] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary a :Optional[Any] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(UpperCAmelCase_ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] a :int = min( UpperCAmelCase_ , key=UpperCAmelCase_ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( a ) , ( a ) , ) :Optional[int] = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
94
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p a :int = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} a :Union[str, Any] = parent a :Dict = batch_size a :Any = num_channels a :Optional[int] = min_resolution a :Dict = max_resolution a :Tuple = do_resize a :Union[str, Any] = size a :Optional[int] = do_normalize a :Tuple = image_mean a :Tuple = image_std a :Any = do_rescale a :Optional[int] = rescale_factor a :Union[str, Any] = do_pad def SCREAMING_SNAKE_CASE__ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False ): if not batched: a :Optional[Any] = image_inputs[0] if isinstance(_lowerCamelCase , Image.Image ): a , a :Tuple = image.size else: a , a :int = image.shape[1], image.shape[2] if w < h: a :int = int(self.size['''shortest_edge'''] * h / w ) a :List[Any] = self.size['''shortest_edge'''] elif w > h: a :Optional[Any] = self.size['''shortest_edge'''] a :Union[str, Any] = int(self.size['''shortest_edge'''] * w / h ) else: a :Dict = self.size['''shortest_edge'''] a :Any = self.size['''shortest_edge'''] else: a :Dict = [] for image in image_inputs: a , a :Tuple = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) a :Union[str, Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0] a :Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _snake_case ( _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ConditionalDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = ConditionalDetrImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) ) def SCREAMING_SNAKE_CASE__ ( self ): a :str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , _lowerCamelCase ) a :Dict = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): pass def SCREAMING_SNAKE_CASE__ ( self ): # Initialize image_processing a :Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input a :Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values a , a :Any = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a , a :Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) a :int = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE__ ( self ): # Initialize image_processing a :Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input a :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values a , a :str = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a :Dict = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values a , a :Dict = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE__ ( self ): # Initialize image_processing a :str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a :int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input a :Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values a , a :Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a :List[Any] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values a , a :List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # prepare image and target a :Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: a :Any = json.loads(f.read() ) a :str = {'''image_id''': 3_9769, '''annotations''': target} # encode them a :int = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) a :Union[str, Any] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors='''pt''' ) # verify pixel values a :List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase ) a :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) ) # verify area a :Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) ) # verify boxes a :Any = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase ) a :str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1e-3 ) ) # verify image_id a :Dict = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) ) # verify is_crowd a :Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) ) # verify class_labels a :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) ) # verify orig_size a :Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) ) # verify size a :Any = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # prepare image, target and masks_path a :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: a :List[Any] = json.loads(f.read() ) a :Dict = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target} a :Tuple = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them a :int = ConditionalDetrImageProcessor(format='''coco_panoptic''' ) a :Optional[Any] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors='''pt''' ) # verify pixel values a :Optional[Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase ) a :str = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) ) # verify area a :Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) ) # verify boxes a :int = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase ) a :Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1e-3 ) ) # verify image_id a :int = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) ) # verify is_crowd a :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) ) # verify class_labels a :List[str] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) ) # verify masks a :Optional[Any] = 82_2873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _lowerCamelCase ) # verify orig_size a :Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) ) # verify size a :int = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) )
94
from maths.prime_factors import prime_factors def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a :Dict = F'''Input value of [number={number}] must be an integer''' raise TypeError(UpperCAmelCase_ ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
94
1
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def __lowerCamelCase ( ): """simple docstring""" print('''Making key files...''' ) make_key_files('''rsa''' , 1024 ) print('''Key files generation successful.''' ) def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" print('''Generating prime p...''' ) a :Dict = rabinMiller.generate_large_prime(UpperCAmelCase_ ) print('''Generating prime q...''' ) a :Optional[Any] = rabinMiller.generate_large_prime(UpperCAmelCase_ ) a :int = p * q print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' ) while True: a :Optional[Any] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(UpperCAmelCase_ , (p - 1) * (q - 1) ) == 1: break print('''Calculating d that is mod inverse of e...''' ) a :Union[str, Any] = cryptoMath.find_mod_inverse(UpperCAmelCase_ , (p - 1) * (q - 1) ) a :Tuple = (n, e) a :Optional[int] = (n, d) return (public_key, private_key) def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ): """simple docstring""" if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ): print('''\nWARNING:''' ) print( F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() a , a :List[str] = generate_key(UpperCAmelCase_ ) print(F'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(F'''{name}_pubkey.txt''' , '''w''' ) as out_file: out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' ) print(F'''Writing private key to file {name}_privkey.txt...''' ) with open(F'''{name}_privkey.txt''' , '''w''' ) as out_file: out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' ) if __name__ == "__main__": main()
94
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging snake_case : List[str] = logging.get_logger(__name__) snake_case : int = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'blenderbot-small' SCREAMING_SNAKE_CASE__ = ['past_key_values'] SCREAMING_SNAKE_CASE__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=512 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=512 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ): a :Dict = vocab_size a :Optional[Any] = max_position_embeddings a :str = d_model a :Any = encoder_ffn_dim a :Optional[int] = encoder_layers a :List[str] = encoder_attention_heads a :List[str] = decoder_ffn_dim a :Optional[int] = decoder_layers a :str = decoder_attention_heads a :List[str] = dropout a :Optional[int] = attention_dropout a :Dict = activation_dropout a :List[str] = activation_function a :List[Any] = init_std a :Optional[int] = encoder_layerdrop a :Tuple = decoder_layerdrop a :List[str] = use_cache a :int = encoder_layers a :Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , ) class _snake_case ( _snake_case ): @property def SCREAMING_SNAKE_CASE__ ( self ): if self.task in ["default", "seq2seq-lm"]: a :Optional[Any] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: a :Union[str, Any] = {0: '''batch'''} a :Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: a :Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} a :str = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. a :Optional[int] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: a , a :str = self.num_layers for i in range(_lowerCamelCase ): a :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} a :List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: a :Optional[int] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE__ ( self ): if self.task in ["default", "seq2seq-lm"]: a :List[Any] = super().outputs else: a :Union[str, Any] = super(_lowerCamelCase , self ).outputs if self.use_past: a , a :int = self.num_layers for i in range(_lowerCamelCase ): a :int = {0: '''batch''', 2: '''past_sequence + sequence'''} a :Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Generate decoder inputs a :Dict = seq_length if not self.use_past else 1 a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) a :List[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} a :List[str] = dict(**_lowerCamelCase , **_lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch a , a :Optional[Any] = common_inputs['''input_ids'''].shape a :Tuple = common_inputs['''decoder_input_ids'''].shape[1] a , a :List[Any] = self.num_attention_heads a :List[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) a :int = decoder_seq_length + 3 a :Union[str, Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) a :Union[str, Any] = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 ) a :List[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered a , a :Optional[int] = self.num_layers a :str = min(_lowerCamelCase , _lowerCamelCase ) a :str = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers a :Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(_lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), ) ) # TODO: test this. a :int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(_lowerCamelCase , _lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch a , a :Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values a :Optional[int] = seqlen + 2 a , a :Union[str, Any] = self.num_layers a , a :Optional[Any] = self.num_attention_heads a :str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) a :Tuple = common_inputs['''attention_mask'''].dtype a :Any = torch.cat( [common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 ) a :Any = [ (torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase ) ] return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX a :Optional[Any] = compute_effective_axis_dimension( _lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX a :Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase ) a :Tuple = compute_effective_axis_dimension( _lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence a :List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size a :Dict = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): if self.task in ["default", "seq2seq-lm"]: a :Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) elif self.task == "causal-lm": a :Dict = self._generate_dummy_inputs_for_causal_lm( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) else: a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if self.task in ["default", "seq2seq-lm"]: a :Optional[int] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: a :Any = super(_lowerCamelCase , self )._flatten_past_key_values_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
94
1
from jiwer import compute_measures import datasets snake_case : List[Any] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' snake_case : int = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' snake_case : str = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def SCREAMING_SNAKE_CASE__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', ] , ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False ): if concatenate_texts: return compute_measures(_lowerCamelCase , _lowerCamelCase )["wer"] else: a :Tuple = 0 a :str = 0 for prediction, reference in zip(_lowerCamelCase , _lowerCamelCase ): a :Tuple = compute_measures(_lowerCamelCase , _lowerCamelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
94
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers snake_case : Union[str, Any] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None ): """simple docstring""" require_version(deps[pkg] , UpperCAmelCase_ )
94
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'ClapFeatureExtractor' SCREAMING_SNAKE_CASE__ = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self , _lowerCamelCase , _lowerCamelCase ): super().__init__(_lowerCamelCase , _lowerCamelCase ) def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: a :Optional[int] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if audios is not None: a :Tuple = self.feature_extractor( _lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if text is not None and audios is not None: a :Union[str, Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.tokenizer.model_input_names a :str = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
94
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return "".join(sorted(UpperCAmelCase_ ) ) def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return word_by_signature[signature(UpperCAmelCase_ )] snake_case : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') snake_case : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()}) snake_case : str = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": snake_case : Optional[int] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
94
1
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case : Any = '''▁''' snake_case : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class _snake_case ( _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = BigBirdTokenizer SCREAMING_SNAKE_CASE__ = BigBirdTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True def SCREAMING_SNAKE_CASE__ ( self ): super().setUp() a :Union[str, Any] = self.tokenizer_class(_lowerCamelCase , keep_accents=_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = '''<s>''' a :int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''[MASK]''' ) self.assertEqual(len(_lowerCamelCase ) , 1004 ) def SCREAMING_SNAKE_CASE__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def SCREAMING_SNAKE_CASE__ ( self ): if not self.test_rust_tokenizer: return a :Union[str, Any] = self.get_tokenizer() a :Dict = self.get_rust_tokenizer() a :Union[str, Any] = '''I was born in 92000, and this is falsé.''' a :List[str] = tokenizer.tokenize(_lowerCamelCase ) a :Union[str, Any] = rust_tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) a :List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) a :str = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) a :str = self.get_rust_tokenizer() a :Optional[Any] = tokenizer.encode(_lowerCamelCase ) a :Dict = rust_tokenizer.encode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :str = BigBirdTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) a :str = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] , ) a :List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) a :List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) a :Union[str, Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def SCREAMING_SNAKE_CASE__ ( self ): return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) @slow def SCREAMING_SNAKE_CASE__ ( self ): a :int = '''Hello World!''' a :Optional[int] = [65, 1_8536, 2260, 101, 66] self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) # fmt: off a :str = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231 # fmt: on self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self ): import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence a :Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] a :Dict = ''' '''.join(_lowerCamelCase ) a :Tuple = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase ) a :int = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase ) a :Union[str, Any] = BigBirdConfig(attention_type='''original_full''' ) a :List[str] = BigBirdModel(_lowerCamelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_lowerCamelCase ) model(**_lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): a :int = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) a :str = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids ) self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # fmt: off a :List[Any] = {'''input_ids''': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
94
import string import numpy def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ ) class _snake_case : SCREAMING_SNAKE_CASE__ = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) SCREAMING_SNAKE_CASE__ = numpy.vectorize(lambda _snake_case : x % 36 ) SCREAMING_SNAKE_CASE__ = numpy.vectorize(_snake_case ) def __init__( self , _lowerCamelCase ): a :List[Any] = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key a :int = encrypt_key.shape[0] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.key_string.index(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.key_string[round(_lowerCamelCase )] def SCREAMING_SNAKE_CASE__ ( self ): a :str = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: a :Any = det % len(self.key_string ) a :Dict = len(self.key_string ) if greatest_common_divisor(_lowerCamelCase , len(self.key_string ) ) != 1: a :int = ( F'''determinant modular {req_l} of encryption key({det}) ''' F'''is not co prime w.r.t {req_l}.\nTry another key.''' ) raise ValueError(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Optional[Any] = [char for char in text.upper() if char in self.key_string] a :List[str] = chars[-1] while len(_lowerCamelCase ) % self.break_key != 0: chars.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Dict = self.process_text(text.upper() ) a :List[str] = '''''' for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ): a :int = text[i : i + self.break_key] a :Optional[int] = [self.replace_letters(_lowerCamelCase ) for char in batch] a :Union[str, Any] = numpy.array([vec] ).T a :str = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[ 0 ] a :List[Any] = ''''''.join( self.replace_digits(_lowerCamelCase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: a :int = det % len(self.key_string ) a :Tuple = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: a :Tuple = i break a :List[str] = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(_lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :List[Any] = self.make_decrypt_key() a :str = self.process_text(text.upper() ) a :List[Any] = '''''' for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ): a :Optional[Any] = text[i : i + self.break_key] a :List[Any] = [self.replace_letters(_lowerCamelCase ) for char in batch] a :str = numpy.array([vec] ).T a :Dict = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0] a :List[Any] = ''''''.join( self.replace_digits(_lowerCamelCase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def __lowerCamelCase ( ): """simple docstring""" a :Tuple = int(input('''Enter the order of the encryption key: ''' ) ) a :Dict = [] print('''Enter each row of the encryption key with space separated integers''' ) for _ in range(UpperCAmelCase_ ): a :List[str] = [int(UpperCAmelCase_ ) for x in input().split()] hill_matrix.append(UpperCAmelCase_ ) a :Any = HillCipher(numpy.array(UpperCAmelCase_ ) ) print('''Would you like to encrypt or decrypt some text? (1 or 2)''' ) a :Any = input('''\n1. Encrypt\n2. Decrypt\n''' ) if option == "1": a :str = input('''What text would you like to encrypt?: ''' ) print('''Your encrypted text is:''' ) print(hc.encrypt(UpperCAmelCase_ ) ) elif option == "2": a :Dict = input('''What text would you like to decrypt?: ''' ) print('''Your decrypted text is:''' ) print(hc.decrypt(UpperCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
94
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin snake_case : List[str] = ''' Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] ''' class _snake_case ( unittest.TestCase , _snake_case ): def SCREAMING_SNAKE_CASE__ ( self ): a :int = load_tool('''text-question-answering''' ) self.tool.setup() a :List[str] = load_tool('''text-question-answering''' , remote=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = self.tool(_lowerCamelCase , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(_lowerCamelCase , '''launched the BigScience Research Workshop''' ) def SCREAMING_SNAKE_CASE__ ( self ): a :str = self.remote_tool(_lowerCamelCase , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(_lowerCamelCase , '''launched the BigScience Research Workshop''' ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = self.tool(text=_lowerCamelCase , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(_lowerCamelCase , '''launched the BigScience Research Workshop''' ) def SCREAMING_SNAKE_CASE__ ( self ): a :int = self.remote_tool(text=_lowerCamelCase , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(_lowerCamelCase , '''launched the BigScience Research Workshop''' )
94
from __future__ import annotations def __lowerCamelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : str ): """simple docstring""" a , a :Optional[Any] = set(UpperCAmelCase_ ), [start] while stack: a :Optional[int] = stack.pop() explored.add(UpperCAmelCase_ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(UpperCAmelCase_ ) return explored snake_case : Optional[int] = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
94
1
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : str = logging.get_logger(__name__) snake_case : Optional[int] = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'markuplm' def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=256 , _lowerCamelCase=1024 , _lowerCamelCase=216 , _lowerCamelCase=1001 , _lowerCamelCase=32 , _lowerCamelCase=50 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ): super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase , ) a :Optional[Any] = vocab_size a :int = hidden_size a :List[Any] = num_hidden_layers a :str = num_attention_heads a :Tuple = hidden_act a :Any = intermediate_size a :Optional[int] = hidden_dropout_prob a :Optional[Any] = attention_probs_dropout_prob a :Any = max_position_embeddings a :Union[str, Any] = type_vocab_size a :Optional[int] = initializer_range a :Any = layer_norm_eps a :Any = position_embedding_type a :Optional[Any] = use_cache a :Optional[Any] = classifier_dropout # additional properties a :Optional[int] = max_depth a :int = max_xpath_tag_unit_embeddings a :Optional[Any] = max_xpath_subs_unit_embeddings a :Union[str, Any] = tag_pad_id a :str = subs_pad_id a :List[str] = xpath_unit_hidden_size
94
import math class _snake_case : def __init__( self , _lowerCamelCase=0 ): # a graph with Node 0,1,...,N-1 a :Optional[int] = n a :Union[str, Any] = [ [math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase ) ] # adjacency matrix for weight a :List[Any] = [ [math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase ) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Tuple = w def SCREAMING_SNAKE_CASE__ ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): a :Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): return self.dp[u][v] if __name__ == "__main__": snake_case : str = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
94
1
from timeit import timeit def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" if number < 0: raise ValueError('''the value of input must not be negative''' ) a :Tuple = 0 while number: number &= number - 1 result += 1 return result def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" if number < 0: raise ValueError('''the value of input must not be negative''' ) a :Optional[int] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __lowerCamelCase ( ): """simple docstring""" def do_benchmark(UpperCAmelCase_ : int ) -> None: a :str = '''import __main__ as z''' print(F'''Benchmark when {number = }:''' ) print(F'''{get_set_bits_count_using_modulo_operator(UpperCAmelCase_ ) = }''' ) a :str = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=UpperCAmelCase_ ) print(F'''timeit() runs in {timing} seconds''' ) print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(UpperCAmelCase_ ) = }''' ) a :Union[str, Any] = timeit( '''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=UpperCAmelCase_ , ) print(F'''timeit() runs in {timing} seconds''' ) for number in (25, 37, 58, 0): do_benchmark(UpperCAmelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
94
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name snake_case : Union[str, Any] = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=8 ): """simple docstring""" a :List[str] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a :int = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _snake_case ( _snake_case ): def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): super().__init__() self.register_modules( unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , ) a :Any = 2 ** (len(self.movq.config.block_out_channels ) - 1) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if latents is None: a :str = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) a :Any = latents.to(_lowerCamelCase ) a :Dict = latents * scheduler.init_noise_sigma return latents def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) a :int = torch.device(F'''cuda:{gpu_id}''' ) a :int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ): if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) a :Any = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a :Tuple = None for cpu_offloaded_model in [self.unet, self.movq]: a , a :List[str] = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase ) # We'll offload the last model manually. a :str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def SCREAMING_SNAKE_CASE__ ( self ): if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(_lowerCamelCase , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_lowerCamelCase ) def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 100 , _lowerCamelCase = 4.0 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ): a :int = self._execution_device a :Optional[Any] = guidance_scale > 1.0 if isinstance(_lowerCamelCase , _lowerCamelCase ): a :Union[str, Any] = torch.cat(_lowerCamelCase , dim=0 ) a :Any = image_embeds.shape[0] * num_images_per_prompt if isinstance(_lowerCamelCase , _lowerCamelCase ): a :List[str] = torch.cat(_lowerCamelCase , dim=0 ) if do_classifier_free_guidance: a :Union[str, Any] = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 ) a :Optional[int] = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 ) a :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase ) self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase ) a :Optional[Any] = self.scheduler.timesteps a :List[str] = self.unet.config.in_channels a , a :str = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor ) # create initial latent a :int = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance a :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a :Union[str, Any] = {'''image_embeds''': image_embeds} a :Optional[Any] = self.unet( sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0] if do_classifier_free_guidance: a , a :Any = noise_pred.split(latents.shape[1] , dim=1 ) a , a :List[str] = noise_pred.chunk(2 ) a , a :int = variance_pred.chunk(2 ) a :List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a :Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a , a :Tuple = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a :int = self.scheduler.step( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0] # post-processing a :int = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: a :str = image * 0.5 + 0.5 a :List[Any] = image.clamp(0 , 1 ) a :str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a :str = self.numpy_to_pil(_lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCamelCase )
94
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.999 , UpperCAmelCase_ : Dict="cosine" , ): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCAmelCase_ : int ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCAmelCase_ : Optional[int] ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) a :Dict = [] for i in range(UpperCAmelCase_ ): a :Dict = i / num_diffusion_timesteps a :List[str] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa ) class _snake_case ( _snake_case , _snake_case ): SCREAMING_SNAKE_CASE__ = [e.name for e in KarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE__ = 2 @register_to_config def __init__( self , _lowerCamelCase = 1000 , _lowerCamelCase = 0.0_0085 , _lowerCamelCase = 0.012 , _lowerCamelCase = "linear" , _lowerCamelCase = None , _lowerCamelCase = "epsilon" , _lowerCamelCase = "linspace" , _lowerCamelCase = 0 , ): if trained_betas is not None: a :Dict = torch.tensor(_lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": a :List[str] = torch.linspace(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. a :List[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule a :List[str] = betas_for_alpha_bar(_lowerCamelCase ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) a :str = 1.0 - self.betas a :Union[str, Any] = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=None ): if schedule_timesteps is None: a :Union[str, Any] = self.timesteps a :Tuple = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: a :Any = 1 if len(_lowerCamelCase ) > 1 else 0 else: a :int = timestep.cpu().item() if torch.is_tensor(_lowerCamelCase ) else timestep a :List[Any] = self._index_counter[timestep_int] return indices[pos].item() @property def SCREAMING_SNAKE_CASE__ ( self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , ): a :List[Any] = self.index_for_timestep(_lowerCamelCase ) if self.state_in_first_order: a :Optional[int] = self.sigmas[step_index] else: a :List[Any] = self.sigmas_interpol[step_index] a :Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ): a :Tuple = num_inference_steps a :Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": a :int = np.linspace(0 , num_train_timesteps - 1 , _lowerCamelCase , dtype=_lowerCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": a :Any = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 a :Optional[Any] = (np.arange(0 , _lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(_lowerCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": a :List[Any] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 a :str = (np.arange(_lowerCamelCase , 0 , -step_ratio )).round().copy().astype(_lowerCamelCase ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) a :str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) a :List[str] = torch.from_numpy(np.log(_lowerCamelCase ) ).to(_lowerCamelCase ) a :Optional[int] = np.interp(_lowerCamelCase , np.arange(0 , len(_lowerCamelCase ) ) , _lowerCamelCase ) a :Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) a :Any = torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase ) # interpolate sigmas a :int = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() a :Optional[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) a :Any = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(_lowerCamelCase ).startswith('''mps''' ): # mps does not support float64 a :Union[str, Any] = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase , dtype=torch.floataa ) else: a :Union[str, Any] = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase ) # interpolate timesteps a :Any = self.sigma_to_t(_lowerCamelCase ).to(_lowerCamelCase , dtype=timesteps.dtype ) a :Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() a :int = torch.cat([timesteps[:1], interleaved_timesteps] ) a :Dict = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter a :str = defaultdict(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): # get log sigma a :Any = sigma.log() # get distribution a :Optional[Any] = log_sigma - self.log_sigmas[:, None] # get sigmas range a :int = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) a :int = low_idx + 1 a :Optional[Any] = self.log_sigmas[low_idx] a :str = self.log_sigmas[high_idx] # interpolate sigmas a :Any = (low - log_sigma) / (low - high) a :Tuple = w.clamp(0 , 1 ) # transform interpolation to time range a :Any = (1 - w) * low_idx + w * high_idx a :List[str] = t.view(sigma.shape ) return t @property def SCREAMING_SNAKE_CASE__ ( self ): return self.sample is None def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ): a :Union[str, Any] = self.index_for_timestep(_lowerCamelCase ) # advance index counter by 1 a :Tuple = timestep.cpu().item() if torch.is_tensor(_lowerCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: a :Tuple = self.sigmas[step_index] a :int = self.sigmas_interpol[step_index + 1] a :List[Any] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method a :List[Any] = self.sigmas[step_index - 1] a :List[str] = self.sigmas_interpol[step_index] a :Tuple = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API a :int = 0 a :List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": a :Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol a :Any = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": a :str = sigma_hat if self.state_in_first_order else sigma_interpol a :Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('''prediction_type not implemented yet: sample''' ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order a :int = (sample - pred_original_sample) / sigma_hat # 3. delta timestep a :Dict = sigma_interpol - sigma_hat # store for 2nd order step a :Dict = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order a :Any = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep a :List[str] = sigma_next - sigma_hat a :str = self.sample a :str = None a :List[str] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples a :Any = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(_lowerCamelCase ): # mps does not support float64 a :str = self.timesteps.to(original_samples.device , dtype=torch.floataa ) a :str = timesteps.to(original_samples.device , dtype=torch.floataa ) else: a :Optional[Any] = self.timesteps.to(original_samples.device ) a :List[Any] = timesteps.to(original_samples.device ) a :Dict = [self.index_for_timestep(_lowerCamelCase , _lowerCamelCase ) for t in timesteps] a :int = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): a :List[Any] = sigma.unsqueeze(-1 ) a :List[str] = original_samples + noise * sigma return noisy_samples def __len__( self ): return self.config.num_train_timesteps
94
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = '' SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): super().__init__(self , **_lowerCamelCase ) a :Union[str, Any] = repo_info a :int = token a :int = None def SCREAMING_SNAKE_CASE__ ( self ): if self.dir_cache is None: a :Dict = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes a :List[Any] = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ): if not isinstance(self.repo_info , _lowerCamelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha ) return fsspec.open( _lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ): self._get_dirs() a :Union[str, Any] = self._strip_protocol(_lowerCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ): self._get_dirs() a :str = PurePosixPath(path.strip('''/''' ) ) a :Tuple = {} for p, f in self.dir_cache.items(): a :Optional[int] = PurePosixPath(p.strip('''/''' ) ) a :str = p.parent if root == path: a :List[str] = f a :Any = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
94
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name snake_case : Union[str, Any] = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=8 ): """simple docstring""" a :List[str] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a :int = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _snake_case ( _snake_case ): def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): super().__init__() self.register_modules( unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , ) a :Any = 2 ** (len(self.movq.config.block_out_channels ) - 1) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if latents is None: a :str = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) a :Any = latents.to(_lowerCamelCase ) a :Dict = latents * scheduler.init_noise_sigma return latents def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) a :int = torch.device(F'''cuda:{gpu_id}''' ) a :int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ): if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) a :Any = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a :Tuple = None for cpu_offloaded_model in [self.unet, self.movq]: a , a :List[str] = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase ) # We'll offload the last model manually. a :str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def SCREAMING_SNAKE_CASE__ ( self ): if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(_lowerCamelCase , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_lowerCamelCase ) def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 100 , _lowerCamelCase = 4.0 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ): a :int = self._execution_device a :Optional[Any] = guidance_scale > 1.0 if isinstance(_lowerCamelCase , _lowerCamelCase ): a :Union[str, Any] = torch.cat(_lowerCamelCase , dim=0 ) a :Any = image_embeds.shape[0] * num_images_per_prompt if isinstance(_lowerCamelCase , _lowerCamelCase ): a :List[str] = torch.cat(_lowerCamelCase , dim=0 ) if do_classifier_free_guidance: a :Union[str, Any] = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 ) a :Optional[int] = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 ) a :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase ) self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase ) a :Optional[Any] = self.scheduler.timesteps a :List[str] = self.unet.config.in_channels a , a :str = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor ) # create initial latent a :int = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance a :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a :Union[str, Any] = {'''image_embeds''': image_embeds} a :Optional[Any] = self.unet( sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0] if do_classifier_free_guidance: a , a :Any = noise_pred.split(latents.shape[1] , dim=1 ) a , a :List[str] = noise_pred.chunk(2 ) a , a :int = variance_pred.chunk(2 ) a :List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a :Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a , a :Tuple = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a :int = self.scheduler.step( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0] # post-processing a :int = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: a :str = image * 0.5 + 0.5 a :List[Any] = image.clamp(0 , 1 ) a :str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a :str = self.numpy_to_pil(_lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCamelCase )
94
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter snake_case : int = '''Create a default config file for Accelerate with only a few flags set.''' def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any]="no" , UpperCAmelCase_ : str = default_json_config_file , UpperCAmelCase_ : bool = False ): """simple docstring""" a :List[str] = Path(UpperCAmelCase_ ) path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False a :Optional[Any] = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) a :List[Any] = { '''compute_environment''': '''LOCAL_MACHINE''', '''mixed_precision''': mixed_precision, } if torch.cuda.is_available(): a :Dict = torch.cuda.device_count() a :Tuple = num_gpus a :int = False if num_gpus > 1: a :str = '''MULTI_GPU''' else: a :List[Any] = '''NO''' elif is_xpu_available() and use_xpu: a :List[Any] = torch.xpu.device_count() a :Optional[int] = num_xpus a :List[Any] = False if num_xpus > 1: a :int = '''MULTI_XPU''' else: a :str = '''NO''' elif is_npu_available(): a :List[str] = torch.npu.device_count() a :Any = num_npus a :Optional[int] = False if num_npus > 1: a :List[str] = '''MULTI_NPU''' else: a :Dict = '''NO''' else: a :str = 0 a :Optional[Any] = True a :Optional[Any] = 1 a :str = '''NO''' a :List[str] = ClusterConfig(**UpperCAmelCase_ ) config.to_json_file(UpperCAmelCase_ ) return path def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a :List[Any] = parser.add_parser('''default''' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ ) parser.add_argument( '''--config_file''' , default=UpperCAmelCase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=UpperCAmelCase_ , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=UpperCAmelCase_ ) return parser def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" a :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
94
1
import math class _snake_case : def __init__( self , _lowerCamelCase=0 ): # a graph with Node 0,1,...,N-1 a :Optional[int] = n a :Union[str, Any] = [ [math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase ) ] # adjacency matrix for weight a :List[Any] = [ [math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase ) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Tuple = w def SCREAMING_SNAKE_CASE__ ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): a :Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): return self.dp[u][v] if __name__ == "__main__": snake_case : str = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
94
import sys snake_case : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowerCamelCase ( UpperCAmelCase_ : str = N ): """simple docstring""" a :Optional[Any] = -sys.maxsize - 1 for i in range(len(UpperCAmelCase_ ) - 12 ): a :Dict = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: a :str = product return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
94
1
import warnings from .generation import TFGenerationMixin class _snake_case ( _snake_case ): # warning at import time warnings.warn( 'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ' 'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , _snake_case , )
94
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]="attention" ): """simple docstring""" a :Optional[int] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel'''] a :int = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel'''] return k, o, q, v def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=False ): """simple docstring""" if split_mlp_wi: a :int = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel'''] a :Dict = (wi_a, wi_a) else: a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel'''] a :Dict = params[F'''{prefix}/layers_{i}/mlp/wo/kernel'''] return wi, wo def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ): """simple docstring""" return params[F'''{prefix}/layers_{i}/{layer_name}/scale'''] def __lowerCamelCase ( UpperCAmelCase_ : dict , *, UpperCAmelCase_ : int , UpperCAmelCase_ : bool ): """simple docstring""" a :str = traverse_util.flatten_dict(variables['''target'''] ) a :Any = {'''/'''.join(UpperCAmelCase_ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi a :Any = '''encoder/layers_0/mlp/wi_0/kernel''' in old print('''Split MLP:''' , UpperCAmelCase_ ) a :Optional[Any] = collections.OrderedDict() # Shared embeddings. a :Union[str, Any] = old['''token_embedder/embedding'''] # Encoder. for i in range(UpperCAmelCase_ ): # Block i, layer 0 (Self Attention). a :Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_attention_layer_norm''' ) a , a , a , a :Optional[int] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''attention''' ) a :List[Any] = layer_norm a :str = k.T a :Dict = o.T a :int = q.T a :Optional[Any] = v.T # Block i, layer 1 (MLP). a :Tuple = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_mlp_layer_norm''' ) a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , UpperCAmelCase_ ) a :Any = layer_norm if split_mlp_wi: a :Any = wi[0].T a :Tuple = wi[1].T else: a :List[str] = wi.T a :List[Any] = wo.T a :Union[str, Any] = old[ '''encoder/relpos_bias/rel_embedding''' ].T a :Optional[Any] = old['''encoder/encoder_norm/scale'''] if not is_encoder_only: # Decoder. for i in range(UpperCAmelCase_ ): # Block i, layer 0 (Self Attention). a :List[str] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_self_attention_layer_norm''' ) a , a , a , a :List[Any] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''self_attention''' ) a :List[Any] = layer_norm a :Tuple = k.T a :int = o.T a :Any = q.T a :Optional[int] = v.T # Block i, layer 1 (Cross Attention). a :str = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_cross_attention_layer_norm''' ) a , a , a , a :Any = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''encoder_decoder_attention''' ) a :str = layer_norm a :Optional[Any] = k.T a :Any = o.T a :Dict = q.T a :Optional[Any] = v.T # Block i, layer 2 (MLP). a :Optional[int] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_mlp_layer_norm''' ) a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , UpperCAmelCase_ ) a :Optional[int] = layer_norm if split_mlp_wi: a :int = wi[0].T a :Tuple = wi[1].T else: a :str = wi.T a :Dict = wo.T a :Any = old['''decoder/decoder_norm/scale'''] a :Optional[Any] = old[ '''decoder/relpos_bias/rel_embedding''' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: a :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T return new def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : bool ): """simple docstring""" a :List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: a :Optional[Any] = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: a :Tuple = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) a :Optional[Any] = state_dict['''shared.weight'''] return state_dict def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ): """simple docstring""" a :Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase_ ) a :Optional[int] = convert_tax_to_pytorch(UpperCAmelCase_ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase_ ) a :Tuple = make_state_dict(UpperCAmelCase_ , UpperCAmelCase_ ) model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ): """simple docstring""" a :List[Any] = TaConfig.from_json_file(UpperCAmelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: a :Any = TaEncoderModel(UpperCAmelCase_ ) else: a :List[str] = TaForConditionalGeneration(UpperCAmelCase_ ) # Load weights from tf checkpoint load_tax_weights_in_ta(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(UpperCAmelCase_ ) # Verify that we can load the checkpoint. model.from_pretrained(UpperCAmelCase_ ) print('''Done''' ) if __name__ == "__main__": snake_case : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) snake_case : Optional[Any] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
94
1
from __future__ import annotations def __lowerCamelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : str ): """simple docstring""" a , a :Optional[Any] = set(UpperCAmelCase_ ), [start] while stack: a :Optional[int] = stack.pop() explored.add(UpperCAmelCase_ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(UpperCAmelCase_ ) return explored snake_case : Optional[int] = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
94
def __lowerCamelCase ( UpperCAmelCase_ : int = 100_0000 ): """simple docstring""" a :Any = set(range(3 , UpperCAmelCase_ , 2 ) ) primes.add(2 ) for p in range(3 , UpperCAmelCase_ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , UpperCAmelCase_ , UpperCAmelCase_ ) ) ) a :Union[str, Any] = [float(UpperCAmelCase_ ) for n in range(limit + 1 )] for p in primes: for n in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F"""{solution() = }""")
94
1
import os from distutils.util import strtobool def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ): """simple docstring""" for e in env_keys: a :Optional[int] = int(os.environ.get(UpperCAmelCase_ , -1 ) ) if val >= 0: return val return default def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=False ): """simple docstring""" a :Dict = os.environ.get(UpperCAmelCase_ , str(UpperCAmelCase_ ) ) return strtobool(UpperCAmelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int... def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple="no" ): """simple docstring""" a :List[str] = os.environ.get(UpperCAmelCase_ , str(UpperCAmelCase_ ) ) return value
94
snake_case : str = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' snake_case : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] snake_case : int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
94
1
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ): """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a :Tuple = F'''Expected string as input, found {type(UpperCAmelCase_ )}''' raise ValueError(UpperCAmelCase_ ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a :List[Any] = F'''Expected boolean as use_pascal parameter, found {type(UpperCAmelCase_ )}''' raise ValueError(UpperCAmelCase_ ) a :int = input_str.split('''_''' ) a :Any = 0 if use_pascal else 1 a :int = words[start_index:] a :Tuple = [word[0].upper() + word[1:] for word in words_to_capitalize] a :Optional[int] = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
94
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'ClapFeatureExtractor' SCREAMING_SNAKE_CASE__ = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self , _lowerCamelCase , _lowerCamelCase ): super().__init__(_lowerCamelCase , _lowerCamelCase ) def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: a :Optional[int] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if audios is not None: a :Tuple = self.feature_extractor( _lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if text is not None and audios is not None: a :Union[str, Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.tokenizer.model_input_names a :str = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
94
1
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow snake_case : Optional[Any] = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , ): a :Optional[int] = [file for file in os.listdir(_lowerCamelCase ) if os.path.isfile(os.path.join(_lowerCamelCase , _lowerCamelCase ) )] if identifier is not None: a :Optional[Any] = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_lowerCamelCase , _lowerCamelCase ): for n_ in n_identifier: a :Optional[Any] = [file for file in files if n_ not in file] else: a :Optional[int] = [file for file in files if n_identifier not in file] a :List[Any] = ignore_files or [] ignore_files.append('''__init__.py''' ) a :Union[str, Any] = [file for file in files if file not in ignore_files] for file in files: # Open all files print('''Testing''' , _lowerCamelCase ) if only_modules: a :Dict = file.split('''.''' )[0] try: a :int = getattr(_lowerCamelCase , _lowerCamelCase ) a :List[Any] = doctest.DocTestSuite(_lowerCamelCase ) a :int = unittest.TextTestRunner().run(_lowerCamelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'''{module_identifier} is not a module.''' ) else: a :Any = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self ): a :int = Path('''src/transformers''' ) a :Any = '''modeling''' a :Optional[Any] = [ '''modeling_ctrl.py''', '''modeling_tf_ctrl.py''', ] self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase , ignore_files=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = Path('''src/transformers''' ) a :Any = '''tokenization''' self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Any = Path('''src/transformers''' ) a :Union[str, Any] = '''configuration''' self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = Path('''src/transformers''' ) a :Any = ['''configuration''', '''modeling''', '''tokenization'''] self.analyze_directory(_lowerCamelCase , n_identifier=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[int] = Path('''docs/source''' ) a :int = ['''favicon.ico'''] self.analyze_directory(_lowerCamelCase , ignore_files=_lowerCamelCase , only_modules=_lowerCamelCase )
94
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=True ): """simple docstring""" model.train() a :str = model(UpperCAmelCase_ ) a :List[str] = F.mse_loss(UpperCAmelCase_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int=False ): """simple docstring""" set_seed(42 ) a :List[Any] = RegressionModel() a :Any = deepcopy(UpperCAmelCase_ ) a :Tuple = RegressionDataset(length=80 ) a :Tuple = DataLoader(UpperCAmelCase_ , batch_size=16 ) model.to(accelerator.device ) if sched: a :str = AdamW(params=model.parameters() , lr=1E-3 ) a :str = AdamW(params=ddp_model.parameters() , lr=1E-3 ) a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 ) a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 ) # Make a copy of `model` if sched: a , a , a , a :List[Any] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: a , a :str = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a , a , a :str = get_training_setup(UpperCAmelCase_ ) # Use a single batch a , a :Dict = next(iter(UpperCAmelCase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a , a :int = accelerator.gather((ddp_input, ddp_target) ) a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: # Sync grads step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :Union[str, Any] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a , a , a :List[str] = get_training_setup(UpperCAmelCase_ ) # Use a single batch a , a :List[str] = next(iter(UpperCAmelCase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a , a :List[Any] = accelerator.gather((ddp_input, ddp_target) ) a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: # Sync grads step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :Any = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=False ): """simple docstring""" a :Optional[int] = Accelerator( split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a , a , a :List[str] = get_training_setup(UpperCAmelCase_ ) for iteration, batch in enumerate(UpperCAmelCase_ ): a , a :List[Any] = batch.values() # Gather the distributed inputs and targs for the base model a , a :List[str] = accelerator.gather((ddp_input, ddp_target) ) a , a :List[str] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCAmelCase_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :List[str] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] GradientState._reset_state() def __lowerCamelCase ( UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[int]=False ): """simple docstring""" a :Optional[Any] = Accelerator( split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a , a , a , a , a , a , a :Optional[Any] = get_training_setup(UpperCAmelCase_ , UpperCAmelCase_ ) for iteration, batch in enumerate(UpperCAmelCase_ ): a , a :int = batch.values() # Gather the distributed inputs and targs for the base model a , a :List[str] = accelerator.gather((ddp_input, ddp_target) ) a , a :str = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCAmelCase_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' a :Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCAmelCase_ )) if accelerator.num_processes > 1: check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def __lowerCamelCase ( ): """simple docstring""" a :Optional[Any] = Accelerator() a :int = RegressionDataset(length=80 ) a :List[str] = DataLoader(UpperCAmelCase_ , batch_size=16 ) a :List[Any] = RegressionDataset(length=96 ) a :Any = DataLoader(UpperCAmelCase_ , batch_size=16 ) a , a :Optional[int] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCAmelCase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ ) if iteration < len(UpperCAmelCase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCAmelCase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ ) if batch_num < len(UpperCAmelCase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __lowerCamelCase ( ): """simple docstring""" a :Optional[int] = Accelerator() a :Optional[int] = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(UpperCAmelCase_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(UpperCAmelCase_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCAmelCase_ , UpperCAmelCase_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCAmelCase_ , UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : Tuple ): """simple docstring""" main() if __name__ == "__main__": main()
94
1
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration snake_case : str = HfArgumentParser(InitializationArguments) snake_case : Tuple = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization snake_case : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks snake_case : str = { '''vocab_size''': len(tokenizer), '''scale_attn_by_inverse_layer_idx''': True, '''reorder_and_upcast_attn''': True, } # Load model config (GPT-2 large in this case) snake_case : Tuple = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config snake_case : Any = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
94
def __lowerCamelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int ): """simple docstring""" if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError('''The length of profit and weight must be same.''' ) if max_weight <= 0: raise ValueError('''max_weight must greater than zero.''' ) if any(p < 0 for p in profit ): raise ValueError('''Profit can not be negative.''' ) if any(w < 0 for w in weight ): raise ValueError('''Weight can not be negative.''' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. a :Optional[int] = [p / w for p, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )] # Creating a copy of the list and sorting profit/weight in ascending order a :List[Any] = sorted(UpperCAmelCase_ ) # declaring useful variables a :Dict = len(UpperCAmelCase_ ) a :Tuple = 0 a :List[Any] = 0 a :str = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight a :List[Any] = sorted_profit_by_weight[length - i - 1] a :Optional[Any] = profit_by_weight.index(UpperCAmelCase_ ) a :Optional[int] = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( '''Input profits, weights, and then max_weight (all positive ints) separated by ''' '''spaces.''' ) snake_case : Union[str, Any] = [int(x) for x in input('''Input profits separated by spaces: ''').split()] snake_case : Tuple = [int(x) for x in input('''Input weights separated by spaces: ''').split()] snake_case : str = int(input('''Max weight allowed: ''')) # Function Call calc_profit(profit, weight, max_weight)
94
1
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __lowerCamelCase ( UpperCAmelCase_ : Dict ): """simple docstring""" a , a :Tuple = image.size a , a :Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 a :str = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) a :Union[str, Any] = np.array(UpperCAmelCase_ ).astype(np.floataa ) / 255.0 a :Tuple = image[None].transpose(0 , 3 , 1 , 2 ) a :str = torch.from_numpy(UpperCAmelCase_ ) return 2.0 * image - 1.0 class _snake_case ( _snake_case ): def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): super().__init__() self.register_modules(vqvae=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase ) @torch.no_grad() def __call__( self , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 100 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ): if isinstance(_lowerCamelCase , PIL.Image.Image ): a :List[str] = 1 elif isinstance(_lowerCamelCase , torch.Tensor ): a :int = image.shape[0] else: raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_lowerCamelCase )}''' ) if isinstance(_lowerCamelCase , PIL.Image.Image ): a :Dict = preprocess(_lowerCamelCase ) a , a :Optional[int] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image a :Dict = (batch_size, self.unet.config.in_channels // 2, height, width) a :int = next(self.unet.parameters() ).dtype a :Tuple = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=_lowerCamelCase ) a :Optional[Any] = image.to(device=self.device , dtype=_lowerCamelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(_lowerCamelCase , device=self.device ) a :Optional[int] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler a :Tuple = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a :Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) a :Tuple = {} if accepts_eta: a :Optional[int] = eta for t in self.progress_bar(_lowerCamelCase ): # concat latents and low resolution image in the channel dimension. a :Tuple = torch.cat([latents, image] , dim=1 ) a :int = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) # predict the noise residual a :Union[str, Any] = self.unet(_lowerCamelCase , _lowerCamelCase ).sample # compute the previous noisy sample x_t -> x_t-1 a :Optional[Any] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample # decode the image latents with the VQVAE a :Optional[int] = self.vqvae.decode(_lowerCamelCase ).sample a :Optional[Any] = torch.clamp(_lowerCamelCase , -1.0 , 1.0 ) a :List[Any] = image / 2 + 0.5 a :Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a :int = self.numpy_to_pil(_lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCamelCase )
94
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case : Dict = logging.get_logger(__name__) snake_case : Tuple = '''▁''' snake_case : Any = {'''vocab_file''': '''sentencepiece.bpe.model'''} snake_case : Tuple = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } snake_case : int = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask'] def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ): # Mask token behave like a normal word, i.e. include the space before it a :Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token a :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) a :str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token a :Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab a :List[str] = 1 a :Dict = len(self.sp_model ) + self.fairseq_offset a :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): a :List[str] = self.__dict__.copy() a :Optional[int] = None a :int = self.sp_model.serialized_model_proto() return state def __setstate__( self , _lowerCamelCase ): a :Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): a :Union[str, Any] = {} a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a :List[Any] = [self.cls_token_id] a :Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): a :int = [self.sep_token_id] a :int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE__ ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def SCREAMING_SNAKE_CASE__ ( self ): a :Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] a :Optional[Any] = self.sp_model.PieceToId(_lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Tuple = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if not os.path.isdir(_lowerCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return a :int = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , '''wb''' ) as fi: a :List[Any] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
94
1
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class _snake_case ( _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = XLMProphetNetTokenizer SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = True def SCREAMING_SNAKE_CASE__ ( self ): super().setUp() # We have a SentencePiece fixture for testing a :int = XLMProphetNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[int] = '''[PAD]''' a :Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''[PAD]''' ) self.assertEqual(vocab_keys[1] , '''[CLS]''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(_lowerCamelCase ) , 1012 ) def SCREAMING_SNAKE_CASE__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = XLMProphetNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) a :List[str] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a :Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) a :str = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) a :List[Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''[UNK]''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''[UNK]''', '''.''', ] , ) @cached_property def SCREAMING_SNAKE_CASE__ ( self ): return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' ) @slow def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = '''Hello World!''' a :str = [3_5389, 6672, 49, 2] self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # fmt: off a :List[str] = {'''input_ids''': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
94
def __lowerCamelCase ( UpperCAmelCase_ : int = 1000 ): """simple docstring""" a , a :int = 1, 1 a :Any = 2 while True: a :Optional[int] = 0 a :str = fa + fa a , a :List[Any] = fa, f index += 1 for _ in str(UpperCAmelCase_ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
94
1
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast snake_case : List[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class _snake_case ( datasets.BuilderConfig ): SCREAMING_SNAKE_CASE__ = 1_0000 SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None class _snake_case ( datasets.ArrowBasedBuilder ): SCREAMING_SNAKE_CASE__ = ParquetConfig def SCREAMING_SNAKE_CASE__ ( self ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) a :Union[str, Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): a :Dict = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): a :List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a :List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] a :List[str] = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): a :Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a :Any = [dl_manager.iter_files(_lowerCamelCase ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_lowerCamelCase ): with open(_lowerCamelCase , '''rb''' ) as f: a :int = datasets.Features.from_arrow_schema(pq.read_schema(_lowerCamelCase ) ) break splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a :Dict = table_cast(_lowerCamelCase , self.info.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :str = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): with open(_lowerCamelCase , '''rb''' ) as f: a :str = pq.ParquetFile(_lowerCamelCase ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): a :int = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_lowerCamelCase ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}''' ) raise
94
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=1000 , ): a :str = parent a :str = batch_size a :List[Any] = seq_length a :Union[str, Any] = is_training a :str = use_input_mask a :Tuple = use_token_type_ids a :Optional[int] = use_labels a :Union[str, Any] = vocab_size a :Optional[Any] = hidden_size a :Any = num_hidden_layers a :Optional[int] = num_attention_heads a :Tuple = intermediate_size a :Dict = hidden_act a :str = hidden_dropout_prob a :List[Any] = attention_probs_dropout_prob a :List[Any] = max_position_embeddings a :List[str] = type_vocab_size a :List[Any] = type_sequence_label_size a :Union[str, Any] = initializer_range a :Optional[Any] = num_labels a :Optional[int] = num_choices a :Union[str, Any] = scope a :List[str] = range_bbox def SCREAMING_SNAKE_CASE__ ( self ): a :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment a :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a :List[Any] = bbox[i, j, 3] a :List[str] = bbox[i, j, 1] a :List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: a :Dict = bbox[i, j, 2] a :Dict = bbox[i, j, 0] a :Any = t a :Optional[Any] = tf.convert_to_tensor(_lowerCamelCase ) a :int = None if self.use_input_mask: a :List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a :Optional[int] = None if self.use_token_type_ids: a :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a :List[Any] = None a :List[Any] = None a :List[Any] = None if self.use_labels: a :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a :List[str] = ids_tensor([self.batch_size] , self.num_choices ) a :List[Any] = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = TFLayoutLMModel(config=_lowerCamelCase ) a :Dict = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase , token_type_ids=_lowerCamelCase ) a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :List[str] = TFLayoutLMForMaskedLM(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = self.num_labels a :List[Any] = TFLayoutLMForSequenceClassification(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :int = self.num_labels a :Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[Any] = TFLayoutLMForQuestionAnswering(config=_lowerCamelCase ) a :Optional[int] = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) :List[Any] = config_and_inputs a :Union[str, Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class _snake_case ( _snake_case , _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE__ = ( { 'feature-extraction': TFLayoutLMModel, 'fill-mask': TFLayoutLMForMaskedLM, 'text-classification': TFLayoutLMForSequenceClassification, 'token-classification': TFLayoutLMForTokenClassification, 'zero-shot': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = 10 def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = TFLayoutLMModelTester(self ) a :Dict = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ): a :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a :str = TFLayoutLMModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) @unittest.skip('''Onnx compliancy broke with TF 2.10''' ) def SCREAMING_SNAKE_CASE__ ( self ): pass def __lowerCamelCase ( ): """simple docstring""" a :Tuple = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 a :Any = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 a :List[str] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 a :List[str] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) a :Any = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class _snake_case ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' ) a , a , a , a , a :Optional[Any] = prepare_layoutlm_batch_inputs() # forward pass a :Tuple = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) # test the sequence output on [0, :3, :3] a :List[str] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-3 ) ) # test the pooled output on [1, :3] a :List[str] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized sequence classification head a :str = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 ) a , a , a , a , a :List[str] = prepare_layoutlm_batch_inputs() # forward pass a :List[Any] = model( input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar a :Union[str, Any] = outputs.loss a :Optional[Any] = (2,) self.assertEqual(loss.shape , _lowerCamelCase ) # test the shape of the logits a :Any = outputs.logits a :Tuple = (2, 2) self.assertEqual(logits.shape , _lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized token classification head a :Dict = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 ) a , a , a , a , a :Dict = prepare_layoutlm_batch_inputs() # forward pass a :List[Any] = model( input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) # test the shape of the logits a :Optional[Any] = outputs.logits a :List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized token classification head a :List[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' ) a , a , a , a , a :Any = prepare_layoutlm_batch_inputs() # forward pass a :str = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) # test the shape of the logits a :Optional[int] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _lowerCamelCase ) self.assertEqual(outputs.end_logits.shape , _lowerCamelCase )
94
1
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''', datefmt='''%Y-%m-%d %H:%M:%S''', level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(), stream=sys.stdout, ) snake_case : Any = logging.getLogger(__name__) snake_case : Optional[int] = {'''facebook/bart-base''': BartForConditionalGeneration} snake_case : List[str] = {'''facebook/bart-base''': BartTokenizer} def __lowerCamelCase ( ): """simple docstring""" a :List[Any] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' ) parser.add_argument( '''--validation_file''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='''A csv or a json file containing the validation data.''' ) parser.add_argument( '''--max_length''' , type=UpperCAmelCase_ , default=5 , help='''The maximum total input sequence length after tokenization.''' , ) parser.add_argument( '''--num_beams''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help=( '''Number of beams to use for evaluation. This argument will be ''' '''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.''' ) , ) parser.add_argument( '''--model_name_or_path''' , type=UpperCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCAmelCase_ , ) parser.add_argument( '''--config_name''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='''Pretrained config name or path if not the same as model_name''' , ) parser.add_argument( '''--device''' , type=UpperCAmelCase_ , default='''cpu''' , help='''Device where the model will be run''' , ) parser.add_argument('''--output_file_path''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='''Where to store the final ONNX file.''' ) a :Dict = parser.parse_args() return args def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]="cpu" ): """simple docstring""" a :Optional[Any] = model_dict[model_name].from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ ) a :Tuple = tokenizer_dict[model_name].from_pretrained(UpperCAmelCase_ ) if model_name in ["facebook/bart-base"]: a :str = 0 a :Optional[Any] = None a :Dict = 0 return huggingface_model, tokenizer def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ): """simple docstring""" model.eval() a :Tuple = None a :Dict = torch.jit.script(BARTBeamSearchGenerator(UpperCAmelCase_ ) ) with torch.no_grad(): a :List[Any] = '''My friends are cool but they eat too many carbs.''' a :List[str] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device ) a :Tuple = model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=UpperCAmelCase_ , max_length=UpperCAmelCase_ , early_stopping=UpperCAmelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( UpperCAmelCase_ , ( inputs['''input_ids'''], inputs['''attention_mask'''], num_beams, max_length, model.config.decoder_start_token_id, ) , UpperCAmelCase_ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''seq'''}, '''output_ids''': {0: '''batch''', 1: '''seq_out'''}, } , example_outputs=UpperCAmelCase_ , ) logger.info('''Model exported to {}'''.format(UpperCAmelCase_ ) ) a :Dict = remove_dup_initializers(os.path.abspath(UpperCAmelCase_ ) ) logger.info('''Deduplicated and optimized model written to {}'''.format(UpperCAmelCase_ ) ) a :List[str] = onnxruntime.InferenceSession(UpperCAmelCase_ ) a :List[str] = ort_sess.run( UpperCAmelCase_ , { '''input_ids''': inputs['''input_ids'''].cpu().numpy(), '''attention_mask''': inputs['''attention_mask'''].cpu().numpy(), '''num_beams''': np.array(UpperCAmelCase_ ), '''max_length''': np.array(UpperCAmelCase_ ), '''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info('''Model outputs from torch and ONNX Runtime are similar.''' ) logger.info('''Success.''' ) def __lowerCamelCase ( ): """simple docstring""" a :List[Any] = parse_args() a :str = 5 a :Optional[Any] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() a :Any = torch.device(args.device ) a , a :str = load_model_tokenizer(args.model_name_or_path , UpperCAmelCase_ ) if model.config.decoder_start_token_id is None: raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' ) model.to(UpperCAmelCase_ ) if args.max_length: a :List[str] = args.max_length if args.num_beams: a :Any = args.num_beams if args.output_file_path: a :Any = args.output_file_path else: a :Dict = '''BART.onnx''' logger.info('''Exporting model to ONNX''' ) export_and_validate_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if __name__ == "__main__": main()
94
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" while b: a , a :Optional[Any] = b, a % b return a def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b ) def __lowerCamelCase ( ): """simple docstring""" print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
94
1
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'Speech2TextFeatureExtractor' SCREAMING_SNAKE_CASE__ = 'Speech2TextTokenizer' def __init__( self , _lowerCamelCase , _lowerCamelCase ): super().__init__(_lowerCamelCase , _lowerCamelCase ) a :int = self.feature_extractor a :Optional[Any] = False def __call__( self , *_lowerCamelCase , **_lowerCamelCase ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowerCamelCase , **_lowerCamelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) a :int = kwargs.pop('''raw_speech''' ) else: a :List[str] = kwargs.pop('''audio''' , _lowerCamelCase ) a :str = kwargs.pop('''sampling_rate''' , _lowerCamelCase ) a :str = kwargs.pop('''text''' , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: a :str = args[0] a :Tuple = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: a :Dict = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase ) if text is not None: a :Optional[int] = self.tokenizer(_lowerCamelCase , **_lowerCamelCase ) if text is None: return inputs elif audio is None: return encodings else: a :Optional[Any] = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @contextmanager def SCREAMING_SNAKE_CASE__ ( self ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) a :Union[str, Any] = True a :str = self.tokenizer yield a :List[str] = self.feature_extractor a :Any = False
94
from __future__ import annotations def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] | None = None , UpperCAmelCase_ : dict[str, float] | None = None , UpperCAmelCase_ : bool = False , ): """simple docstring""" a :str = cipher_alphabet or [chr(UpperCAmelCase_ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) a :List[Any] = { '''a''': 0.08497, '''b''': 0.01492, '''c''': 0.02202, '''d''': 0.04253, '''e''': 0.11162, '''f''': 0.02228, '''g''': 0.02015, '''h''': 0.06094, '''i''': 0.07546, '''j''': 0.00153, '''k''': 0.01292, '''l''': 0.04025, '''m''': 0.02406, '''n''': 0.06749, '''o''': 0.07507, '''p''': 0.01929, '''q''': 0.00095, '''r''': 0.07587, '''s''': 0.06327, '''t''': 0.09356, '''u''': 0.02758, '''v''': 0.00978, '''w''': 0.02560, '''x''': 0.00150, '''y''': 0.01994, '''z''': 0.00077, } else: # Custom frequencies dictionary a :Dict = frequencies_dict if not case_sensitive: a :Union[str, Any] = ciphertext.lower() # Chi squared statistic values a :dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(UpperCAmelCase_ ) ): a :int = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet a :Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( UpperCAmelCase_ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter a :List[Any] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: a :Optional[int] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message a :List[Any] = decrypted_with_shift.lower().count(UpperCAmelCase_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies a :Dict = frequencies[letter] * occurrences # Complete the chi squared statistic formula a :Any = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message a :int = decrypted_with_shift.count(UpperCAmelCase_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies a :Tuple = frequencies[letter] * occurrences # Complete the chi squared statistic formula a :Optional[Any] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary a :Optional[Any] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(UpperCAmelCase_ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] a :int = min( UpperCAmelCase_ , key=UpperCAmelCase_ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( a ) , ( a ) , ) :Optional[int] = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
94
1
import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) a :List[Any] = img a :Any = img.shape[1] a :str = img.shape[0] a :Optional[Any] = dst_width a :int = dst_height a :Optional[int] = self.src_w / self.dst_w a :Dict = self.src_h / self.dst_h a :Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def SCREAMING_SNAKE_CASE__ ( self ): for i in range(self.dst_h ): for j in range(self.dst_w ): a :Dict = self.img[self.get_y(_lowerCamelCase )][self.get_x(_lowerCamelCase )] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return int(self.ratio_x * x ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return int(self.ratio_y * y ) if __name__ == "__main__": snake_case , snake_case : List[str] = 8_00, 6_00 snake_case : Dict = imread('''image_data/lena.jpg''', 1) snake_case : int = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output ) waitKey(0) destroyAllWindows()
94
from maths.prime_factors import prime_factors def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a :Dict = F'''Input value of [number={number}] must be an integer''' raise TypeError(UpperCAmelCase_ ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
94
1
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def __lowerCamelCase ( ): """simple docstring""" import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join a :Optional[int] = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase_ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def __lowerCamelCase ( ): """simple docstring""" assert _test_patching.open is open a :Dict = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , UpperCAmelCase_ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def __lowerCamelCase ( ): """simple docstring""" a :Any = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , UpperCAmelCase_ ): pass def __lowerCamelCase ( ): """simple docstring""" a :Any = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , UpperCAmelCase_ ) is None with patch_submodule(_test_patching , '''len''' , UpperCAmelCase_ ): assert _test_patching.len is mock assert _test_patching.len is len def __lowerCamelCase ( ): """simple docstring""" a :Tuple = '''__test_patch_submodule_start_and_stop_mock__''' a :Any = patch_submodule(_test_patching , '''open''' , UpperCAmelCase_ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def __lowerCamelCase ( ): """simple docstring""" from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join a :Dict = '''__test_patch_submodule_successive_join__''' a :List[str] = '''__test_patch_submodule_successive_dirname__''' a :Any = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase_ ): with patch_submodule(_test_patching , '''os.rename''' , UpperCAmelCase_ ): with patch_submodule(_test_patching , '''os.path.dirname''' , UpperCAmelCase_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , UpperCAmelCase_ ): with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase_ ): with patch_submodule(_test_patching , '''os.path.dirname''' , UpperCAmelCase_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def __lowerCamelCase ( ): """simple docstring""" a :Tuple = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , UpperCAmelCase_ ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , UpperCAmelCase_ ): pass
94
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging snake_case : List[str] = logging.get_logger(__name__) snake_case : int = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'blenderbot-small' SCREAMING_SNAKE_CASE__ = ['past_key_values'] SCREAMING_SNAKE_CASE__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=512 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=512 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ): a :Dict = vocab_size a :Optional[Any] = max_position_embeddings a :str = d_model a :Any = encoder_ffn_dim a :Optional[int] = encoder_layers a :List[str] = encoder_attention_heads a :List[str] = decoder_ffn_dim a :Optional[int] = decoder_layers a :str = decoder_attention_heads a :List[str] = dropout a :Optional[int] = attention_dropout a :Dict = activation_dropout a :List[str] = activation_function a :List[Any] = init_std a :Optional[int] = encoder_layerdrop a :Tuple = decoder_layerdrop a :List[str] = use_cache a :int = encoder_layers a :Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , ) class _snake_case ( _snake_case ): @property def SCREAMING_SNAKE_CASE__ ( self ): if self.task in ["default", "seq2seq-lm"]: a :Optional[Any] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: a :Union[str, Any] = {0: '''batch'''} a :Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: a :Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} a :str = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. a :Optional[int] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: a , a :str = self.num_layers for i in range(_lowerCamelCase ): a :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} a :List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: a :Optional[int] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE__ ( self ): if self.task in ["default", "seq2seq-lm"]: a :List[Any] = super().outputs else: a :Union[str, Any] = super(_lowerCamelCase , self ).outputs if self.use_past: a , a :int = self.num_layers for i in range(_lowerCamelCase ): a :int = {0: '''batch''', 2: '''past_sequence + sequence'''} a :Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Generate decoder inputs a :Dict = seq_length if not self.use_past else 1 a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) a :List[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} a :List[str] = dict(**_lowerCamelCase , **_lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch a , a :Optional[Any] = common_inputs['''input_ids'''].shape a :Tuple = common_inputs['''decoder_input_ids'''].shape[1] a , a :List[Any] = self.num_attention_heads a :List[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) a :int = decoder_seq_length + 3 a :Union[str, Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) a :Union[str, Any] = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 ) a :List[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered a , a :Optional[int] = self.num_layers a :str = min(_lowerCamelCase , _lowerCamelCase ) a :str = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers a :Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(_lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), ) ) # TODO: test this. a :int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(_lowerCamelCase , _lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch a , a :Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values a :Optional[int] = seqlen + 2 a , a :Union[str, Any] = self.num_layers a , a :Optional[Any] = self.num_attention_heads a :str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) a :Tuple = common_inputs['''attention_mask'''].dtype a :Any = torch.cat( [common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 ) a :Any = [ (torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase ) ] return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX a :Optional[Any] = compute_effective_axis_dimension( _lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX a :Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase ) a :Tuple = compute_effective_axis_dimension( _lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence a :List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size a :Dict = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): if self.task in ["default", "seq2seq-lm"]: a :Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) elif self.task == "causal-lm": a :Dict = self._generate_dummy_inputs_for_causal_lm( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) else: a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if self.task in ["default", "seq2seq-lm"]: a :Optional[int] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: a :Any = super(_lowerCamelCase , self )._flatten_past_key_values_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
94
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case : Optional[int] = logging.get_logger(__name__) snake_case : Any = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class _snake_case ( _snake_case , _snake_case ): SCREAMING_SNAKE_CASE__ = 'swin' SCREAMING_SNAKE_CASE__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , _lowerCamelCase=224 , _lowerCamelCase=4 , _lowerCamelCase=3 , _lowerCamelCase=96 , _lowerCamelCase=[2, 2, 6, 2] , _lowerCamelCase=[3, 6, 12, 24] , _lowerCamelCase=7 , _lowerCamelCase=4.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=32 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ): super().__init__(**_lowerCamelCase ) a :Optional[Any] = image_size a :List[Any] = patch_size a :Any = num_channels a :str = embed_dim a :str = depths a :Optional[Any] = len(_lowerCamelCase ) a :int = num_heads a :str = window_size a :List[Any] = mlp_ratio a :Union[str, Any] = qkv_bias a :Optional[Any] = hidden_dropout_prob a :List[Any] = attention_probs_dropout_prob a :Any = drop_path_rate a :Dict = hidden_act a :Union[str, Any] = use_absolute_embeddings a :Tuple = layer_norm_eps a :Optional[Any] = initializer_range a :List[str] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a :List[str] = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) ) a :List[Any] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase ) + 1 )] a , a :Dict = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names ) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): return 1e-4
94
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers snake_case : Union[str, Any] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None ): """simple docstring""" require_version(deps[pkg] , UpperCAmelCase_ )
94
1
import baseaa def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return baseaa.baaencode(string.encode('''utf-8''' ) ) def __lowerCamelCase ( UpperCAmelCase_ : bytes ): """simple docstring""" return baseaa.baadecode(UpperCAmelCase_ ).decode('''utf-8''' ) if __name__ == "__main__": snake_case : Any = '''Hello World!''' snake_case : Optional[int] = baseaa_encode(test) print(encoded) snake_case : Any = baseaa_decode(encoded) print(decoded)
94
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return "".join(sorted(UpperCAmelCase_ ) ) def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return word_by_signature[signature(UpperCAmelCase_ )] snake_case : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') snake_case : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()}) snake_case : str = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": snake_case : Optional[int] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
94
1
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging snake_case : List[Any] = logging.get_logger(__name__) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = ['input_values', 'padding_mask'] def __init__( self , _lowerCamelCase = 1 , _lowerCamelCase = 2_4000 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase ) a :Optional[int] = chunk_length_s a :Union[str, Any] = overlap @property def SCREAMING_SNAKE_CASE__ ( self ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def SCREAMING_SNAKE_CASE__ ( self ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if padding and truncation: raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' ) elif padding is None: # by default let's pad the inputs a :Optional[Any] = True a :Dict = bool( isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: a :Dict = [np.asarray(_lowerCamelCase , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ): a :Union[str, Any] = np.asarray(_lowerCamelCase , dtype=np.floataa ) elif isinstance(_lowerCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): a :List[Any] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: a :Tuple = [np.asarray(_lowerCamelCase ).T] # verify inputs are valid for idx, example in enumerate(_lowerCamelCase ): if example.ndim > 2: raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' ) a :Tuple = None a :Dict = BatchFeature({'''input_values''': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: a :Dict = min(array.shape[0] for array in raw_audio ) a :List[Any] = int(np.floor(max_length / self.chunk_stride ) ) a :Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: a :str = max(array.shape[0] for array in raw_audio ) a :Union[str, Any] = int(np.ceil(max_length / self.chunk_stride ) ) a :List[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length a :List[Any] = '''max_length''' else: a :str = input_values # normal padding on batch if padded_inputs is None: a :Any = self.pad( _lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , padding=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) if padding: a :Tuple = padded_inputs.pop('''attention_mask''' ) a :Union[str, Any] = [] for example in padded_inputs.pop('''input_values''' ): if self.feature_size == 1: a :Optional[int] = example[..., None] input_values.append(example.T ) a :int = input_values if return_tensors is not None: a :Any = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs
94
import string import numpy def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ ) class _snake_case : SCREAMING_SNAKE_CASE__ = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) SCREAMING_SNAKE_CASE__ = numpy.vectorize(lambda _snake_case : x % 36 ) SCREAMING_SNAKE_CASE__ = numpy.vectorize(_snake_case ) def __init__( self , _lowerCamelCase ): a :List[Any] = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key a :int = encrypt_key.shape[0] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.key_string.index(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.key_string[round(_lowerCamelCase )] def SCREAMING_SNAKE_CASE__ ( self ): a :str = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: a :Any = det % len(self.key_string ) a :Dict = len(self.key_string ) if greatest_common_divisor(_lowerCamelCase , len(self.key_string ) ) != 1: a :int = ( F'''determinant modular {req_l} of encryption key({det}) ''' F'''is not co prime w.r.t {req_l}.\nTry another key.''' ) raise ValueError(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Optional[Any] = [char for char in text.upper() if char in self.key_string] a :List[str] = chars[-1] while len(_lowerCamelCase ) % self.break_key != 0: chars.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Dict = self.process_text(text.upper() ) a :List[str] = '''''' for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ): a :int = text[i : i + self.break_key] a :Optional[int] = [self.replace_letters(_lowerCamelCase ) for char in batch] a :Union[str, Any] = numpy.array([vec] ).T a :str = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[ 0 ] a :List[Any] = ''''''.join( self.replace_digits(_lowerCamelCase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: a :int = det % len(self.key_string ) a :Tuple = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: a :Tuple = i break a :List[str] = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(_lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :List[Any] = self.make_decrypt_key() a :str = self.process_text(text.upper() ) a :List[Any] = '''''' for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ): a :Optional[Any] = text[i : i + self.break_key] a :List[Any] = [self.replace_letters(_lowerCamelCase ) for char in batch] a :str = numpy.array([vec] ).T a :Dict = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0] a :List[Any] = ''''''.join( self.replace_digits(_lowerCamelCase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def __lowerCamelCase ( ): """simple docstring""" a :Tuple = int(input('''Enter the order of the encryption key: ''' ) ) a :Dict = [] print('''Enter each row of the encryption key with space separated integers''' ) for _ in range(UpperCAmelCase_ ): a :List[str] = [int(UpperCAmelCase_ ) for x in input().split()] hill_matrix.append(UpperCAmelCase_ ) a :Any = HillCipher(numpy.array(UpperCAmelCase_ ) ) print('''Would you like to encrypt or decrypt some text? (1 or 2)''' ) a :Any = input('''\n1. Encrypt\n2. Decrypt\n''' ) if option == "1": a :str = input('''What text would you like to encrypt?: ''' ) print('''Your encrypted text is:''' ) print(hc.encrypt(UpperCAmelCase_ ) ) elif option == "2": a :Dict = input('''What text would you like to decrypt?: ''' ) print('''Your decrypted text is:''' ) print(hc.decrypt(UpperCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
94
1
def __lowerCamelCase ( UpperCAmelCase_ : list ): """simple docstring""" a :List[Any] = len(UpperCAmelCase_ ) for i in range(1 , UpperCAmelCase_ ): a :Union[str, Any] = collection[i] a :List[Any] = 0 a :Dict = i - 1 while low <= high: a :List[Any] = (low + high) // 2 if val < collection[mid]: a :str = mid - 1 else: a :Dict = mid + 1 for j in range(UpperCAmelCase_ , UpperCAmelCase_ , -1 ): a :Tuple = collection[j - 1] a :List[Any] = val return collection if __name__ == "__main__": snake_case : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip() snake_case : Optional[int] = [int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
94
from __future__ import annotations def __lowerCamelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : str ): """simple docstring""" a , a :Optional[Any] = set(UpperCAmelCase_ ), [start] while stack: a :Optional[int] = stack.pop() explored.add(UpperCAmelCase_ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(UpperCAmelCase_ ) return explored snake_case : Optional[int] = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
94
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. a :Tuple = [[1, 2, 4], [1, 2, 3, 4]] a :str = DisjunctiveConstraint(_lowerCamelCase ) self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) ) with self.assertRaises(_lowerCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_lowerCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def SCREAMING_SNAKE_CASE__ ( self ): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). a :Optional[int] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_lowerCamelCase ): DisjunctiveConstraint(_lowerCamelCase ) # fails here def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = [[1, 2, 3], [1, 2, 4]] a :Dict = DisjunctiveConstraint(_lowerCamelCase ) a , a , a :Union[str, Any] = dc.update(1 ) a :Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_lowerCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a :Optional[Any] = dc.update(2 ) a :Union[str, Any] = stepped is True and completed is False and reset is False self.assertTrue(_lowerCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a :Union[str, Any] = dc.update(3 ) a :Optional[int] = stepped is True and completed is True and reset is False self.assertTrue(_lowerCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] a :List[str] = DisjunctiveConstraint(_lowerCamelCase ) a , a , a :Tuple = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a :Dict = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a :Dict = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) a , a , a :Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() a , a , a :List[str] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) a , a , a :Dict = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a :List[Any] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
94
import math class _snake_case : def __init__( self , _lowerCamelCase=0 ): # a graph with Node 0,1,...,N-1 a :Optional[int] = n a :Union[str, Any] = [ [math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase ) ] # adjacency matrix for weight a :List[Any] = [ [math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase ) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Tuple = w def SCREAMING_SNAKE_CASE__ ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): a :Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): return self.dp[u][v] if __name__ == "__main__": snake_case : str = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
94
1
from __future__ import annotations def __lowerCamelCase ( UpperCAmelCase_ : list[int] ): """simple docstring""" a :Optional[Any] = len(UpperCAmelCase_ ) // 2 # choose the middle 3 elements a :Dict = lst[m - 1 : m + 2] # if middle element is peak if three[1] > three[0] and three[1] > three[2]: return three[1] # if increasing, recurse on right elif three[0] < three[2]: if len(lst[:m] ) == 2: m -= 1 return peak(lst[m:] ) # decreasing else: if len(lst[:m] ) == 2: m += 1 return peak(lst[:m] ) if __name__ == "__main__": import doctest doctest.testmod()
94
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name snake_case : Union[str, Any] = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=8 ): """simple docstring""" a :List[str] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a :int = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _snake_case ( _snake_case ): def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): super().__init__() self.register_modules( unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , ) a :Any = 2 ** (len(self.movq.config.block_out_channels ) - 1) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if latents is None: a :str = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) a :Any = latents.to(_lowerCamelCase ) a :Dict = latents * scheduler.init_noise_sigma return latents def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) a :int = torch.device(F'''cuda:{gpu_id}''' ) a :int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ): if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) a :Any = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a :Tuple = None for cpu_offloaded_model in [self.unet, self.movq]: a , a :List[str] = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase ) # We'll offload the last model manually. a :str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def SCREAMING_SNAKE_CASE__ ( self ): if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(_lowerCamelCase , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_lowerCamelCase ) def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 100 , _lowerCamelCase = 4.0 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ): a :int = self._execution_device a :Optional[Any] = guidance_scale > 1.0 if isinstance(_lowerCamelCase , _lowerCamelCase ): a :Union[str, Any] = torch.cat(_lowerCamelCase , dim=0 ) a :Any = image_embeds.shape[0] * num_images_per_prompt if isinstance(_lowerCamelCase , _lowerCamelCase ): a :List[str] = torch.cat(_lowerCamelCase , dim=0 ) if do_classifier_free_guidance: a :Union[str, Any] = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 ) a :Optional[int] = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 ) a :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase ) self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase ) a :Optional[Any] = self.scheduler.timesteps a :List[str] = self.unet.config.in_channels a , a :str = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor ) # create initial latent a :int = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance a :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a :Union[str, Any] = {'''image_embeds''': image_embeds} a :Optional[Any] = self.unet( sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0] if do_classifier_free_guidance: a , a :Any = noise_pred.split(latents.shape[1] , dim=1 ) a , a :List[str] = noise_pred.chunk(2 ) a , a :int = variance_pred.chunk(2 ) a :List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a :Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a , a :Tuple = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a :int = self.scheduler.step( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0] # post-processing a :int = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: a :str = image * 0.5 + 0.5 a :List[Any] = image.clamp(0 , 1 ) a :str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a :str = self.numpy_to_pil(_lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCamelCase )
94
1
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() snake_case : Dict = logging.get_logger(__name__) snake_case : Dict = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } snake_case : Union[str, Any] = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ): """simple docstring""" for attribute in key.split('''.''' ): a :List[str] = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) if weight_type is not None: a :Dict = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape else: a :Dict = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": a :Union[str, Any] = value elif weight_type == "weight_g": a :Dict = value elif weight_type == "weight_v": a :int = value elif weight_type == "bias": a :Optional[Any] = value else: a :Any = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a :Dict = [] a :Optional[int] = fairseq_model.state_dict() a :Optional[Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight a :Dict = None for name, value in fairseq_dict.items(): a :List[Any] = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , ) a :Optional[int] = True elif name.split('''.''' )[0] == "proj": a :Union[str, Any] = fairseq_model.proj a :Optional[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: a :int = True if "*" in mapped_key: a :Any = name.split(UpperCAmelCase_ )[0].split('''.''' )[-2] a :int = mapped_key.replace('''*''' , UpperCAmelCase_ ) if "weight_g" in name: a :int = '''weight_g''' elif "weight_v" in name: a :Any = '''weight_v''' elif "bias" in name: a :Union[str, Any] = '''bias''' elif "weight" in name: a :Any = '''weight''' else: a :Any = None set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) continue if not is_used: unused_weights.append(UpperCAmelCase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) return proj_weight def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ): """simple docstring""" a :List[Any] = full_name.split('''conv_layers.''' )[-1] a :int = name.split('''.''' ) a :List[str] = int(items[0] ) a :Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) a :str = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) a :Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) a :Optional[int] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) a :Dict = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : List[str] ): """simple docstring""" a , a :Any = emb.weight.shape a :Optional[int] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ ) a :Dict = emb.weight.data return lin_layer def __lowerCamelCase ( UpperCAmelCase_ : Dict ): """simple docstring""" with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f: a :Tuple = f.readlines() a :List[str] = [line.split(''' ''' )[0] for line in lines] a :str = len(UpperCAmelCase_ ) a :Union[str, Any] = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(UpperCAmelCase_ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , ): """simple docstring""" a :int = WavaVecaConfig.from_pretrained(UpperCAmelCase_ ) a :Union[str, Any] = SpeechaTextaConfig.from_pretrained( UpperCAmelCase_ , vocab_size=UpperCAmelCase_ , decoder_layers=UpperCAmelCase_ , do_stable_layer_norm=UpperCAmelCase_ ) a :str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , ) a , a , a :int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) a :Optional[int] = model[0].eval() # set weights for wav2vec2 encoder a :List[Any] = WavaVecaModel(UpperCAmelCase_ ) a :Optional[int] = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase_ ) a :Union[str, Any] = SpeechaTextaForCausalLM(UpperCAmelCase_ ) a , a :str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase_ ) # set output linear layer unexpected_keys.remove('''embed_out''' ) a :List[str] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) a :Optional[int] = SpeechEncoderDecoderModel(encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ ) a :List[str] = False # add projection layer a :int = nn.Parameter(projection_layer.weight ) a :List[str] = nn.Parameter(projection_layer.bias ) a :Optional[Any] = create_vocab_dict(UpperCAmelCase_ ) with open(os.path.join(UpperCAmelCase_ , '''vocab.json''' ) , '''w''' ) as fp: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) a :Tuple = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase_ , '''vocab.json''' ) ) tokenizer.save_pretrained(UpperCAmelCase_ ) a :Any = hf_wavavec.config.to_dict() a :Optional[int] = tokenizer.pad_token_id a :List[str] = tokenizer.bos_token_id a :List[str] = tokenizer.eos_token_id a :Dict = '''speech_to_text_2''' a :Union[str, Any] = '''wav2vec2''' a :List[str] = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase_ ) hf_wavavec.save_pretrained(UpperCAmelCase_ ) feature_extractor.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": snake_case : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') snake_case : List[Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
94
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = '' SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): super().__init__(self , **_lowerCamelCase ) a :Union[str, Any] = repo_info a :int = token a :int = None def SCREAMING_SNAKE_CASE__ ( self ): if self.dir_cache is None: a :Dict = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes a :List[Any] = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ): if not isinstance(self.repo_info , _lowerCamelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha ) return fsspec.open( _lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ): self._get_dirs() a :Union[str, Any] = self._strip_protocol(_lowerCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ): self._get_dirs() a :str = PurePosixPath(path.strip('''/''' ) ) a :Tuple = {} for p, f in self.dir_cache.items(): a :Optional[int] = PurePosixPath(p.strip('''/''' ) ) a :str = p.parent if root == path: a :List[str] = f a :Any = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
94
1
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return "".join(sorted(UpperCAmelCase_ ) ) def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return word_by_signature[signature(UpperCAmelCase_ )] snake_case : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') snake_case : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()}) snake_case : str = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": snake_case : Optional[int] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
94
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter snake_case : int = '''Create a default config file for Accelerate with only a few flags set.''' def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any]="no" , UpperCAmelCase_ : str = default_json_config_file , UpperCAmelCase_ : bool = False ): """simple docstring""" a :List[str] = Path(UpperCAmelCase_ ) path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False a :Optional[Any] = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) a :List[Any] = { '''compute_environment''': '''LOCAL_MACHINE''', '''mixed_precision''': mixed_precision, } if torch.cuda.is_available(): a :Dict = torch.cuda.device_count() a :Tuple = num_gpus a :int = False if num_gpus > 1: a :str = '''MULTI_GPU''' else: a :List[Any] = '''NO''' elif is_xpu_available() and use_xpu: a :List[Any] = torch.xpu.device_count() a :Optional[int] = num_xpus a :List[Any] = False if num_xpus > 1: a :int = '''MULTI_XPU''' else: a :str = '''NO''' elif is_npu_available(): a :List[str] = torch.npu.device_count() a :Any = num_npus a :Optional[int] = False if num_npus > 1: a :List[str] = '''MULTI_NPU''' else: a :Dict = '''NO''' else: a :str = 0 a :Optional[Any] = True a :Optional[Any] = 1 a :str = '''NO''' a :List[str] = ClusterConfig(**UpperCAmelCase_ ) config.to_json_file(UpperCAmelCase_ ) return path def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a :List[Any] = parser.add_parser('''default''' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ ) parser.add_argument( '''--config_file''' , default=UpperCAmelCase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=UpperCAmelCase_ , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=UpperCAmelCase_ ) return parser def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" a :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
94
1
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ): """simple docstring""" a :int = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ): """simple docstring""" a :Tuple = 0 while b > 0: if b & 1: a :List[str] = ((res % c) + (a % c)) % c a += a b >>= 1 return res
94
import sys snake_case : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowerCamelCase ( UpperCAmelCase_ : str = N ): """simple docstring""" a :Optional[Any] = -sys.maxsize - 1 for i in range(len(UpperCAmelCase_ ) - 12 ): a :Dict = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: a :str = product return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
94
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo snake_case : Optional[Any] = '''\ @misc{wu2016googles, title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } ''' snake_case : Dict = '''\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the \'GLEU score\'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score\'s range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. ''' snake_case : Union[str, Any] = '''\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: \'google_bleu\': google_bleu score Examples: Example 1: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.44 Example 2: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.61 Example 3: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results["google_bleu"], 2)) 0.53 Example 4: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results["google_bleu"], 2)) 0.4 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def SCREAMING_SNAKE_CASE__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = 4 , ): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCamelCase , hypotheses=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase ) }
94
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]="attention" ): """simple docstring""" a :Optional[int] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel'''] a :int = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel'''] return k, o, q, v def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=False ): """simple docstring""" if split_mlp_wi: a :int = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel'''] a :Dict = (wi_a, wi_a) else: a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel'''] a :Dict = params[F'''{prefix}/layers_{i}/mlp/wo/kernel'''] return wi, wo def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ): """simple docstring""" return params[F'''{prefix}/layers_{i}/{layer_name}/scale'''] def __lowerCamelCase ( UpperCAmelCase_ : dict , *, UpperCAmelCase_ : int , UpperCAmelCase_ : bool ): """simple docstring""" a :str = traverse_util.flatten_dict(variables['''target'''] ) a :Any = {'''/'''.join(UpperCAmelCase_ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi a :Any = '''encoder/layers_0/mlp/wi_0/kernel''' in old print('''Split MLP:''' , UpperCAmelCase_ ) a :Optional[Any] = collections.OrderedDict() # Shared embeddings. a :Union[str, Any] = old['''token_embedder/embedding'''] # Encoder. for i in range(UpperCAmelCase_ ): # Block i, layer 0 (Self Attention). a :Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_attention_layer_norm''' ) a , a , a , a :Optional[int] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''attention''' ) a :List[Any] = layer_norm a :str = k.T a :Dict = o.T a :int = q.T a :Optional[Any] = v.T # Block i, layer 1 (MLP). a :Tuple = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_mlp_layer_norm''' ) a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , UpperCAmelCase_ ) a :Any = layer_norm if split_mlp_wi: a :Any = wi[0].T a :Tuple = wi[1].T else: a :List[str] = wi.T a :List[Any] = wo.T a :Union[str, Any] = old[ '''encoder/relpos_bias/rel_embedding''' ].T a :Optional[Any] = old['''encoder/encoder_norm/scale'''] if not is_encoder_only: # Decoder. for i in range(UpperCAmelCase_ ): # Block i, layer 0 (Self Attention). a :List[str] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_self_attention_layer_norm''' ) a , a , a , a :List[Any] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''self_attention''' ) a :List[Any] = layer_norm a :Tuple = k.T a :int = o.T a :Any = q.T a :Optional[int] = v.T # Block i, layer 1 (Cross Attention). a :str = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_cross_attention_layer_norm''' ) a , a , a , a :Any = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''encoder_decoder_attention''' ) a :str = layer_norm a :Optional[Any] = k.T a :Any = o.T a :Dict = q.T a :Optional[Any] = v.T # Block i, layer 2 (MLP). a :Optional[int] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_mlp_layer_norm''' ) a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , UpperCAmelCase_ ) a :Optional[int] = layer_norm if split_mlp_wi: a :int = wi[0].T a :Tuple = wi[1].T else: a :str = wi.T a :Dict = wo.T a :Any = old['''decoder/decoder_norm/scale'''] a :Optional[Any] = old[ '''decoder/relpos_bias/rel_embedding''' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: a :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T return new def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : bool ): """simple docstring""" a :List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: a :Optional[Any] = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: a :Tuple = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) a :Optional[Any] = state_dict['''shared.weight'''] return state_dict def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ): """simple docstring""" a :Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase_ ) a :Optional[int] = convert_tax_to_pytorch(UpperCAmelCase_ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase_ ) a :Tuple = make_state_dict(UpperCAmelCase_ , UpperCAmelCase_ ) model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ): """simple docstring""" a :List[Any] = TaConfig.from_json_file(UpperCAmelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: a :Any = TaEncoderModel(UpperCAmelCase_ ) else: a :List[str] = TaForConditionalGeneration(UpperCAmelCase_ ) # Load weights from tf checkpoint load_tax_weights_in_ta(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(UpperCAmelCase_ ) # Verify that we can load the checkpoint. model.from_pretrained(UpperCAmelCase_ ) print('''Done''' ) if __name__ == "__main__": snake_case : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) snake_case : Optional[Any] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
94
1
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass snake_case : int = (3, 9, -11, 0, 7, 5, 1, -1) snake_case : Optional[Any] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _snake_case : SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 class _snake_case : def __init__( self , _lowerCamelCase ): a :Node | None = None for i in sorted(_lowerCamelCase , reverse=_lowerCamelCase ): a :int = Node(_lowerCamelCase , self.head ) def __iter__( self ): a :Dict = self.head while node: yield node.data a :List[str] = node.next_node def __len__( self ): return sum(1 for _ in self ) def __str__( self ): return " -> ".join([str(_lowerCamelCase ) for node in self] ) def __lowerCamelCase ( UpperCAmelCase_ : SortedLinkedList , UpperCAmelCase_ : SortedLinkedList ): """simple docstring""" return SortedLinkedList(list(UpperCAmelCase_ ) + list(UpperCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() snake_case : List[str] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
94
def __lowerCamelCase ( UpperCAmelCase_ : int = 100_0000 ): """simple docstring""" a :Any = set(range(3 , UpperCAmelCase_ , 2 ) ) primes.add(2 ) for p in range(3 , UpperCAmelCase_ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , UpperCAmelCase_ , UpperCAmelCase_ ) ) ) a :Union[str, Any] = [float(UpperCAmelCase_ ) for n in range(limit + 1 )] for p in primes: for n in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F"""{solution() = }""")
94
1
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : Any = logging.get_logger(__name__) snake_case : Union[str, Any] = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'vit_msn' def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-06 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=True , **_lowerCamelCase , ): super().__init__(**_lowerCamelCase ) a :List[Any] = hidden_size a :Dict = num_hidden_layers a :Union[str, Any] = num_attention_heads a :Tuple = intermediate_size a :str = hidden_act a :Dict = hidden_dropout_prob a :List[Any] = attention_probs_dropout_prob a :Optional[int] = initializer_range a :str = layer_norm_eps a :Union[str, Any] = image_size a :int = patch_size a :str = num_channels a :Optional[Any] = qkv_bias
94
snake_case : str = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' snake_case : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] snake_case : int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
94
1
import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml snake_case : List[str] = logging.get_logger(__name__) def __lowerCamelCase ( UpperCAmelCase_ : bool , UpperCAmelCase_ : bool ): """simple docstring""" def run_func(UpperCAmelCase_ : Tuple ): @wraps(UpperCAmelCase_ ) def run_in_eager_mode(*UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any] ): return func(*UpperCAmelCase_ , **UpperCAmelCase_ ) @wraps(UpperCAmelCase_ ) @tf.function(experimental_compile=UpperCAmelCase_ ) def run_in_graph_mode(*UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any ): return func(*UpperCAmelCase_ , **UpperCAmelCase_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( '''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" a :int = random.Random() a :Dict = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(UpperCAmelCase_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = "TensorFlow" @property def SCREAMING_SNAKE_CASE__ ( self ): return tf.__version__ def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): # initialize GPU on separate process a :int = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) a :List[str] = self._prepare_inference_func(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return self._measure_speed(_inference ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :str = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) a :int = self._prepare_train_func(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return self._measure_speed(_train ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _lowerCamelCase ) a :int = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) a :Optional[int] = self._prepare_inference_func(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return self._measure_memory(_inference ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _lowerCamelCase ) a :Optional[Any] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) a :Any = self._prepare_train_func(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return self._measure_memory(_train ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Any = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) a :Optional[Any] = ( hasattr(_lowerCamelCase , '''architectures''' ) and isinstance(config.architectures , _lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: a :List[Any] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model a :int = __import__('''transformers''' , fromlist=[model_class] ) a :str = getattr(_lowerCamelCase , _lowerCamelCase ) a :Optional[int] = model_cls(_lowerCamelCase ) except ImportError: raise ImportError( F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: a :Dict = TF_MODEL_MAPPING[config.__class__](_lowerCamelCase ) # encoder-decoder has vocab size saved differently a :List[Any] = config.vocab_size if hasattr(_lowerCamelCase , '''vocab_size''' ) else config.encoder.vocab_size a :Dict = random_input_ids(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(_lowerCamelCase , decoder_input_ids=_lowerCamelCase , training=_lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(_lowerCamelCase , training=_lowerCamelCase ) a :Union[str, Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Any = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' ) if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) a :int = ( hasattr(_lowerCamelCase , '''architectures''' ) and isinstance(config.architectures , _lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: a :int = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model a :Optional[Any] = __import__('''transformers''' , fromlist=[model_class] ) a :int = getattr(_lowerCamelCase , _lowerCamelCase ) a :List[Any] = model_cls(_lowerCamelCase ) except ImportError: raise ImportError( F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: a :int = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_lowerCamelCase ) # encoder-decoder has vocab size saved differently a :str = config.vocab_size if hasattr(_lowerCamelCase , '''vocab_size''' ) else config.encoder.vocab_size a :Dict = random_input_ids(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): a :Tuple = model(_lowerCamelCase , decoder_input_ids=_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )[0] a :Tuple = tf.gradients(_lowerCamelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): a :int = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )[0] a :Dict = tf.gradients(_lowerCamelCase , model.trainable_variables ) return gradients a :Optional[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' ) timeit.repeat(_lowerCamelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average a :Dict = timeit.repeat( _lowerCamelCase , repeat=self.args.repeat , number=10 , ) return min(_lowerCamelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(F'''Doesn\'t fit on GPU. {e}''' ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): logger.info( '''Note that TensorFlow allocates more memory than ''' '''it might need to speed up computation. ''' '''The memory reported here corresponds to the memory ''' '''reported by `nvidia-smi`, which can vary depending ''' '''on total available memory on the GPU that is used.''' ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( '''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory''' ''' consumption line by line.''' ) a :Dict = start_memory_tracing('''transformers''' ) if self.args.is_tpu: # tpu raise NotImplementedError( '''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking''' ''' with `args.memory=False`''' ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( '''py3nvml not installed, we won\'t log GPU memory usage. ''' '''Install py3nvml (pip install py3nvml) to log information about GPU.''' ) a :int = '''N/A''' else: logger.info( '''Measuring total GPU usage on GPU device. Make sure to not have additional processes''' ''' running on the same GPU.''' ) # init nvml nvml.nvmlInit() func() a :Tuple = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) a :Optional[int] = nvml.nvmlDeviceGetMemoryInfo(_lowerCamelCase ) a :str = meminfo.used a :str = Memory(_lowerCamelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( '''When enabling line by line tracing, the max peak memory for CPU is inaccurate in''' ''' TensorFlow.''' ) a :Optional[int] = None else: a :Union[str, Any] = measure_peak_memory_cpu(_lowerCamelCase ) a :List[Any] = Memory(_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else memory_bytes if self.args.trace_memory_line_by_line: a :List[str] = stop_memory_tracing(_lowerCamelCase ) if memory is None: a :List[str] = summary.total else: a :Optional[Any] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
94
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'ClapFeatureExtractor' SCREAMING_SNAKE_CASE__ = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self , _lowerCamelCase , _lowerCamelCase ): super().__init__(_lowerCamelCase , _lowerCamelCase ) def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: a :Optional[int] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if audios is not None: a :Tuple = self.feature_extractor( _lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if text is not None and audios is not None: a :Union[str, Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.tokenizer.model_input_names a :str = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
94
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case : List[Any] = { '''configuration_x_clip''': [ '''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XCLIPConfig''', '''XCLIPTextConfig''', '''XCLIPVisionConfig''', ], '''processing_x_clip''': ['''XCLIPProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Optional[Any] = [ '''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XCLIPModel''', '''XCLIPPreTrainedModel''', '''XCLIPTextModel''', '''XCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
94
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=True ): """simple docstring""" model.train() a :str = model(UpperCAmelCase_ ) a :List[str] = F.mse_loss(UpperCAmelCase_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int=False ): """simple docstring""" set_seed(42 ) a :List[Any] = RegressionModel() a :Any = deepcopy(UpperCAmelCase_ ) a :Tuple = RegressionDataset(length=80 ) a :Tuple = DataLoader(UpperCAmelCase_ , batch_size=16 ) model.to(accelerator.device ) if sched: a :str = AdamW(params=model.parameters() , lr=1E-3 ) a :str = AdamW(params=ddp_model.parameters() , lr=1E-3 ) a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 ) a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 ) # Make a copy of `model` if sched: a , a , a , a :List[Any] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: a , a :str = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a , a , a :str = get_training_setup(UpperCAmelCase_ ) # Use a single batch a , a :Dict = next(iter(UpperCAmelCase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a , a :int = accelerator.gather((ddp_input, ddp_target) ) a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: # Sync grads step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :Union[str, Any] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a , a , a :List[str] = get_training_setup(UpperCAmelCase_ ) # Use a single batch a , a :List[str] = next(iter(UpperCAmelCase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a , a :List[Any] = accelerator.gather((ddp_input, ddp_target) ) a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: # Sync grads step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :Any = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=False ): """simple docstring""" a :Optional[int] = Accelerator( split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a , a , a :List[str] = get_training_setup(UpperCAmelCase_ ) for iteration, batch in enumerate(UpperCAmelCase_ ): a , a :List[Any] = batch.values() # Gather the distributed inputs and targs for the base model a , a :List[str] = accelerator.gather((ddp_input, ddp_target) ) a , a :List[str] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCAmelCase_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :List[str] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] GradientState._reset_state() def __lowerCamelCase ( UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[int]=False ): """simple docstring""" a :Optional[Any] = Accelerator( split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a , a , a , a , a , a , a :Optional[Any] = get_training_setup(UpperCAmelCase_ , UpperCAmelCase_ ) for iteration, batch in enumerate(UpperCAmelCase_ ): a , a :int = batch.values() # Gather the distributed inputs and targs for the base model a , a :List[str] = accelerator.gather((ddp_input, ddp_target) ) a , a :str = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCAmelCase_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' a :Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCAmelCase_ )) if accelerator.num_processes > 1: check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def __lowerCamelCase ( ): """simple docstring""" a :Optional[Any] = Accelerator() a :int = RegressionDataset(length=80 ) a :List[str] = DataLoader(UpperCAmelCase_ , batch_size=16 ) a :List[Any] = RegressionDataset(length=96 ) a :Any = DataLoader(UpperCAmelCase_ , batch_size=16 ) a , a :Optional[int] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCAmelCase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ ) if iteration < len(UpperCAmelCase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCAmelCase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ ) if batch_num < len(UpperCAmelCase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __lowerCamelCase ( ): """simple docstring""" a :Optional[int] = Accelerator() a :Optional[int] = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(UpperCAmelCase_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(UpperCAmelCase_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCAmelCase_ , UpperCAmelCase_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCAmelCase_ , UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : Tuple ): """simple docstring""" main() if __name__ == "__main__": main()
94
1
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu snake_case : str = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: snake_case : Union[str, Any] = json.load(f) @require_torch class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return FSMTTokenizer.from_pretrained(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Tuple = FSMTForConditionalGeneration.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['''en-ru''', 26.0], ['''ru-en''', 22.0], ['''en-de''', 22.0], ['''de-en''', 29.0], ] ) @slow def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality a :Tuple = F'''facebook/wmt19-{pair}''' a :Optional[Any] = self.get_tokenizer(_lowerCamelCase ) a :str = self.get_model(_lowerCamelCase ) a :Optional[int] = bleu_data[pair]['''src'''] a :Optional[Any] = bleu_data[pair]['''tgt'''] a :str = tokenizer(_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase , padding='''longest''' ).to(_lowerCamelCase ) a :Optional[Any] = model.generate( input_ids=batch.input_ids , num_beams=8 , ) a :str = tokenizer.batch_decode( _lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase ) a :int = calculate_bleu(_lowerCamelCase , _lowerCamelCase ) print(_lowerCamelCase ) self.assertGreaterEqual(scores['''bleu'''] , _lowerCamelCase )
94
def __lowerCamelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int ): """simple docstring""" if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError('''The length of profit and weight must be same.''' ) if max_weight <= 0: raise ValueError('''max_weight must greater than zero.''' ) if any(p < 0 for p in profit ): raise ValueError('''Profit can not be negative.''' ) if any(w < 0 for w in weight ): raise ValueError('''Weight can not be negative.''' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. a :Optional[int] = [p / w for p, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )] # Creating a copy of the list and sorting profit/weight in ascending order a :List[Any] = sorted(UpperCAmelCase_ ) # declaring useful variables a :Dict = len(UpperCAmelCase_ ) a :Tuple = 0 a :List[Any] = 0 a :str = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight a :List[Any] = sorted_profit_by_weight[length - i - 1] a :Optional[Any] = profit_by_weight.index(UpperCAmelCase_ ) a :Optional[int] = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( '''Input profits, weights, and then max_weight (all positive ints) separated by ''' '''spaces.''' ) snake_case : Union[str, Any] = [int(x) for x in input('''Input profits separated by spaces: ''').split()] snake_case : Tuple = [int(x) for x in input('''Input weights separated by spaces: ''').split()] snake_case : str = int(input('''Max weight allowed: ''')) # Function Call calc_profit(profit, weight, max_weight)
94
1
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = '' SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): super().__init__(self , **_lowerCamelCase ) a :Union[str, Any] = repo_info a :int = token a :int = None def SCREAMING_SNAKE_CASE__ ( self ): if self.dir_cache is None: a :Dict = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes a :List[Any] = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ): if not isinstance(self.repo_info , _lowerCamelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha ) return fsspec.open( _lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ): self._get_dirs() a :Union[str, Any] = self._strip_protocol(_lowerCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ): self._get_dirs() a :str = PurePosixPath(path.strip('''/''' ) ) a :Tuple = {} for p, f in self.dir_cache.items(): a :Optional[int] = PurePosixPath(p.strip('''/''' ) ) a :str = p.parent if root == path: a :List[str] = f a :Any = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
94
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case : Dict = logging.get_logger(__name__) snake_case : Tuple = '''▁''' snake_case : Any = {'''vocab_file''': '''sentencepiece.bpe.model'''} snake_case : Tuple = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } snake_case : int = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask'] def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ): # Mask token behave like a normal word, i.e. include the space before it a :Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token a :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) a :str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token a :Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab a :List[str] = 1 a :Dict = len(self.sp_model ) + self.fairseq_offset a :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): a :List[str] = self.__dict__.copy() a :Optional[int] = None a :int = self.sp_model.serialized_model_proto() return state def __setstate__( self , _lowerCamelCase ): a :Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): a :Union[str, Any] = {} a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a :List[Any] = [self.cls_token_id] a :Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): a :int = [self.sep_token_id] a :int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE__ ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def SCREAMING_SNAKE_CASE__ ( self ): a :Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] a :Optional[Any] = self.sp_model.PieceToId(_lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Tuple = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if not os.path.isdir(_lowerCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return a :int = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , '''wb''' ) as fi: a :List[Any] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
94
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case : int = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Union[str, Any] = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys snake_case : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
94
def __lowerCamelCase ( UpperCAmelCase_ : int = 1000 ): """simple docstring""" a , a :int = 1, 1 a :Any = 2 while True: a :Optional[int] = 0 a :str = fa + fa a , a :List[Any] = fa, f index += 1 for _ in str(UpperCAmelCase_ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
94
1
def __lowerCamelCase ( ): """simple docstring""" for n in range(1 , 100_0000 ): yield n * (n + 1) // 2 def __lowerCamelCase ( UpperCAmelCase_ : List[Any] ): """simple docstring""" a :List[str] = 1 a :List[str] = 2 while i * i <= n: a :int = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def __lowerCamelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(UpperCAmelCase_ ) > 500 ) if __name__ == "__main__": print(solution())
94
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=1000 , ): a :str = parent a :str = batch_size a :List[Any] = seq_length a :Union[str, Any] = is_training a :str = use_input_mask a :Tuple = use_token_type_ids a :Optional[int] = use_labels a :Union[str, Any] = vocab_size a :Optional[Any] = hidden_size a :Any = num_hidden_layers a :Optional[int] = num_attention_heads a :Tuple = intermediate_size a :Dict = hidden_act a :str = hidden_dropout_prob a :List[Any] = attention_probs_dropout_prob a :List[Any] = max_position_embeddings a :List[str] = type_vocab_size a :List[Any] = type_sequence_label_size a :Union[str, Any] = initializer_range a :Optional[Any] = num_labels a :Optional[int] = num_choices a :Union[str, Any] = scope a :List[str] = range_bbox def SCREAMING_SNAKE_CASE__ ( self ): a :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment a :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a :List[Any] = bbox[i, j, 3] a :List[str] = bbox[i, j, 1] a :List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: a :Dict = bbox[i, j, 2] a :Dict = bbox[i, j, 0] a :Any = t a :Optional[Any] = tf.convert_to_tensor(_lowerCamelCase ) a :int = None if self.use_input_mask: a :List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a :Optional[int] = None if self.use_token_type_ids: a :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a :List[Any] = None a :List[Any] = None a :List[Any] = None if self.use_labels: a :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a :List[str] = ids_tensor([self.batch_size] , self.num_choices ) a :List[Any] = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = TFLayoutLMModel(config=_lowerCamelCase ) a :Dict = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase , token_type_ids=_lowerCamelCase ) a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :List[str] = TFLayoutLMForMaskedLM(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = self.num_labels a :List[Any] = TFLayoutLMForSequenceClassification(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :int = self.num_labels a :Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[Any] = TFLayoutLMForQuestionAnswering(config=_lowerCamelCase ) a :Optional[int] = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) :List[Any] = config_and_inputs a :Union[str, Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class _snake_case ( _snake_case , _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE__ = ( { 'feature-extraction': TFLayoutLMModel, 'fill-mask': TFLayoutLMForMaskedLM, 'text-classification': TFLayoutLMForSequenceClassification, 'token-classification': TFLayoutLMForTokenClassification, 'zero-shot': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = 10 def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = TFLayoutLMModelTester(self ) a :Dict = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ): a :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a :str = TFLayoutLMModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) @unittest.skip('''Onnx compliancy broke with TF 2.10''' ) def SCREAMING_SNAKE_CASE__ ( self ): pass def __lowerCamelCase ( ): """simple docstring""" a :Tuple = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 a :Any = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 a :List[str] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 a :List[str] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) a :Any = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class _snake_case ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' ) a , a , a , a , a :Optional[Any] = prepare_layoutlm_batch_inputs() # forward pass a :Tuple = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) # test the sequence output on [0, :3, :3] a :List[str] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-3 ) ) # test the pooled output on [1, :3] a :List[str] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized sequence classification head a :str = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 ) a , a , a , a , a :List[str] = prepare_layoutlm_batch_inputs() # forward pass a :List[Any] = model( input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar a :Union[str, Any] = outputs.loss a :Optional[Any] = (2,) self.assertEqual(loss.shape , _lowerCamelCase ) # test the shape of the logits a :Any = outputs.logits a :Tuple = (2, 2) self.assertEqual(logits.shape , _lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized token classification head a :Dict = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 ) a , a , a , a , a :Dict = prepare_layoutlm_batch_inputs() # forward pass a :List[Any] = model( input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) # test the shape of the logits a :Optional[Any] = outputs.logits a :List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized token classification head a :List[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' ) a , a , a , a , a :Any = prepare_layoutlm_batch_inputs() # forward pass a :str = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) # test the shape of the logits a :Optional[int] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _lowerCamelCase ) self.assertEqual(outputs.end_logits.shape , _lowerCamelCase )
94
1
from __future__ import annotations def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] | None = None , UpperCAmelCase_ : dict[str, float] | None = None , UpperCAmelCase_ : bool = False , ): """simple docstring""" a :str = cipher_alphabet or [chr(UpperCAmelCase_ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) a :List[Any] = { '''a''': 0.08497, '''b''': 0.01492, '''c''': 0.02202, '''d''': 0.04253, '''e''': 0.11162, '''f''': 0.02228, '''g''': 0.02015, '''h''': 0.06094, '''i''': 0.07546, '''j''': 0.00153, '''k''': 0.01292, '''l''': 0.04025, '''m''': 0.02406, '''n''': 0.06749, '''o''': 0.07507, '''p''': 0.01929, '''q''': 0.00095, '''r''': 0.07587, '''s''': 0.06327, '''t''': 0.09356, '''u''': 0.02758, '''v''': 0.00978, '''w''': 0.02560, '''x''': 0.00150, '''y''': 0.01994, '''z''': 0.00077, } else: # Custom frequencies dictionary a :Dict = frequencies_dict if not case_sensitive: a :Union[str, Any] = ciphertext.lower() # Chi squared statistic values a :dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(UpperCAmelCase_ ) ): a :int = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet a :Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( UpperCAmelCase_ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter a :List[Any] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: a :Optional[int] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message a :List[Any] = decrypted_with_shift.lower().count(UpperCAmelCase_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies a :Dict = frequencies[letter] * occurrences # Complete the chi squared statistic formula a :Any = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message a :int = decrypted_with_shift.count(UpperCAmelCase_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies a :Tuple = frequencies[letter] * occurrences # Complete the chi squared statistic formula a :Optional[Any] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary a :Optional[Any] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(UpperCAmelCase_ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] a :int = min( UpperCAmelCase_ , key=UpperCAmelCase_ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( a ) , ( a ) , ) :Optional[int] = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
94
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" while b: a , a :Optional[Any] = b, a % b return a def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b ) def __lowerCamelCase ( ): """simple docstring""" print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
94
1
from __future__ import annotations snake_case : Optional[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] snake_case : Any = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def __lowerCamelCase ( UpperCAmelCase_ : list[float] ): """simple docstring""" a :List[str] = [] a :List[str] = len(UpperCAmelCase_ ) for i in range(UpperCAmelCase_ ): a :float = -1 for j in range(i + 1 , UpperCAmelCase_ ): if arr[i] < arr[j]: a :Dict = arr[j] break result.append(UpperCAmelCase_ ) return result def __lowerCamelCase ( UpperCAmelCase_ : list[float] ): """simple docstring""" a :int = [] for i, outer in enumerate(UpperCAmelCase_ ): a :float = -1 for inner in arr[i + 1 :]: if outer < inner: a :str = inner break result.append(UpperCAmelCase_ ) return result def __lowerCamelCase ( UpperCAmelCase_ : list[float] ): """simple docstring""" a :Optional[Any] = len(UpperCAmelCase_ ) a :list[float] = [] a :list[float] = [-1] * arr_size for index in reversed(range(UpperCAmelCase_ ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: a :Any = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) snake_case : List[str] = ( '''from __main__ import arr, next_greatest_element_slow, ''' '''next_greatest_element_fast, next_greatest_element''' ) print( '''next_greatest_element_slow():''', timeit('''next_greatest_element_slow(arr)''', setup=setup), ) print( '''next_greatest_element_fast():''', timeit('''next_greatest_element_fast(arr)''', setup=setup), ) print( ''' next_greatest_element():''', timeit('''next_greatest_element(arr)''', setup=setup), )
94
from __future__ import annotations def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] | None = None , UpperCAmelCase_ : dict[str, float] | None = None , UpperCAmelCase_ : bool = False , ): """simple docstring""" a :str = cipher_alphabet or [chr(UpperCAmelCase_ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) a :List[Any] = { '''a''': 0.08497, '''b''': 0.01492, '''c''': 0.02202, '''d''': 0.04253, '''e''': 0.11162, '''f''': 0.02228, '''g''': 0.02015, '''h''': 0.06094, '''i''': 0.07546, '''j''': 0.00153, '''k''': 0.01292, '''l''': 0.04025, '''m''': 0.02406, '''n''': 0.06749, '''o''': 0.07507, '''p''': 0.01929, '''q''': 0.00095, '''r''': 0.07587, '''s''': 0.06327, '''t''': 0.09356, '''u''': 0.02758, '''v''': 0.00978, '''w''': 0.02560, '''x''': 0.00150, '''y''': 0.01994, '''z''': 0.00077, } else: # Custom frequencies dictionary a :Dict = frequencies_dict if not case_sensitive: a :Union[str, Any] = ciphertext.lower() # Chi squared statistic values a :dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(UpperCAmelCase_ ) ): a :int = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet a :Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( UpperCAmelCase_ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter a :List[Any] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: a :Optional[int] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message a :List[Any] = decrypted_with_shift.lower().count(UpperCAmelCase_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies a :Dict = frequencies[letter] * occurrences # Complete the chi squared statistic formula a :Any = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message a :int = decrypted_with_shift.count(UpperCAmelCase_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies a :Tuple = frequencies[letter] * occurrences # Complete the chi squared statistic formula a :Optional[Any] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary a :Optional[Any] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(UpperCAmelCase_ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] a :int = min( UpperCAmelCase_ , key=UpperCAmelCase_ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( a ) , ( a ) , ) :Optional[int] = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
94
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available snake_case : Union[str, Any] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : List[str] = ['''SpeechEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Optional[Any] = ['''FlaxSpeechEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
94
from maths.prime_factors import prime_factors def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a :Dict = F'''Input value of [number={number}] must be an integer''' raise TypeError(UpperCAmelCase_ ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
94
1
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) a :Optional[Any] = '''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(UpperCAmelCase_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
94
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging snake_case : List[str] = logging.get_logger(__name__) snake_case : int = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'blenderbot-small' SCREAMING_SNAKE_CASE__ = ['past_key_values'] SCREAMING_SNAKE_CASE__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=512 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=512 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ): a :Dict = vocab_size a :Optional[Any] = max_position_embeddings a :str = d_model a :Any = encoder_ffn_dim a :Optional[int] = encoder_layers a :List[str] = encoder_attention_heads a :List[str] = decoder_ffn_dim a :Optional[int] = decoder_layers a :str = decoder_attention_heads a :List[str] = dropout a :Optional[int] = attention_dropout a :Dict = activation_dropout a :List[str] = activation_function a :List[Any] = init_std a :Optional[int] = encoder_layerdrop a :Tuple = decoder_layerdrop a :List[str] = use_cache a :int = encoder_layers a :Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , ) class _snake_case ( _snake_case ): @property def SCREAMING_SNAKE_CASE__ ( self ): if self.task in ["default", "seq2seq-lm"]: a :Optional[Any] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: a :Union[str, Any] = {0: '''batch'''} a :Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: a :Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} a :str = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. a :Optional[int] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: a , a :str = self.num_layers for i in range(_lowerCamelCase ): a :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} a :List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: a :Optional[int] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE__ ( self ): if self.task in ["default", "seq2seq-lm"]: a :List[Any] = super().outputs else: a :Union[str, Any] = super(_lowerCamelCase , self ).outputs if self.use_past: a , a :int = self.num_layers for i in range(_lowerCamelCase ): a :int = {0: '''batch''', 2: '''past_sequence + sequence'''} a :Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Generate decoder inputs a :Dict = seq_length if not self.use_past else 1 a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) a :List[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} a :List[str] = dict(**_lowerCamelCase , **_lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch a , a :Optional[Any] = common_inputs['''input_ids'''].shape a :Tuple = common_inputs['''decoder_input_ids'''].shape[1] a , a :List[Any] = self.num_attention_heads a :List[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) a :int = decoder_seq_length + 3 a :Union[str, Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) a :Union[str, Any] = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 ) a :List[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered a , a :Optional[int] = self.num_layers a :str = min(_lowerCamelCase , _lowerCamelCase ) a :str = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers a :Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(_lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), ) ) # TODO: test this. a :int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(_lowerCamelCase , _lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch a , a :Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values a :Optional[int] = seqlen + 2 a , a :Union[str, Any] = self.num_layers a , a :Optional[Any] = self.num_attention_heads a :str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) a :Tuple = common_inputs['''attention_mask'''].dtype a :Any = torch.cat( [common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 ) a :Any = [ (torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase ) ] return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX a :Optional[Any] = compute_effective_axis_dimension( _lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX a :Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase ) a :Tuple = compute_effective_axis_dimension( _lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence a :List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size a :Dict = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): if self.task in ["default", "seq2seq-lm"]: a :Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) elif self.task == "causal-lm": a :Dict = self._generate_dummy_inputs_for_causal_lm( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) else: a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if self.task in ["default", "seq2seq-lm"]: a :Optional[int] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: a :Any = super(_lowerCamelCase , self )._flatten_past_key_values_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
94
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging snake_case : List[str] = logging.get_logger(__name__) snake_case : int = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'blenderbot-small' SCREAMING_SNAKE_CASE__ = ['past_key_values'] SCREAMING_SNAKE_CASE__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=512 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=512 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ): a :Dict = vocab_size a :Optional[Any] = max_position_embeddings a :str = d_model a :Any = encoder_ffn_dim a :Optional[int] = encoder_layers a :List[str] = encoder_attention_heads a :List[str] = decoder_ffn_dim a :Optional[int] = decoder_layers a :str = decoder_attention_heads a :List[str] = dropout a :Optional[int] = attention_dropout a :Dict = activation_dropout a :List[str] = activation_function a :List[Any] = init_std a :Optional[int] = encoder_layerdrop a :Tuple = decoder_layerdrop a :List[str] = use_cache a :int = encoder_layers a :Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , ) class _snake_case ( _snake_case ): @property def SCREAMING_SNAKE_CASE__ ( self ): if self.task in ["default", "seq2seq-lm"]: a :Optional[Any] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: a :Union[str, Any] = {0: '''batch'''} a :Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: a :Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} a :str = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. a :Optional[int] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: a , a :str = self.num_layers for i in range(_lowerCamelCase ): a :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} a :List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: a :Optional[int] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE__ ( self ): if self.task in ["default", "seq2seq-lm"]: a :List[Any] = super().outputs else: a :Union[str, Any] = super(_lowerCamelCase , self ).outputs if self.use_past: a , a :int = self.num_layers for i in range(_lowerCamelCase ): a :int = {0: '''batch''', 2: '''past_sequence + sequence'''} a :Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Generate decoder inputs a :Dict = seq_length if not self.use_past else 1 a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) a :List[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} a :List[str] = dict(**_lowerCamelCase , **_lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch a , a :Optional[Any] = common_inputs['''input_ids'''].shape a :Tuple = common_inputs['''decoder_input_ids'''].shape[1] a , a :List[Any] = self.num_attention_heads a :List[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) a :int = decoder_seq_length + 3 a :Union[str, Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) a :Union[str, Any] = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 ) a :List[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered a , a :Optional[int] = self.num_layers a :str = min(_lowerCamelCase , _lowerCamelCase ) a :str = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers a :Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(_lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), ) ) # TODO: test this. a :int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(_lowerCamelCase , _lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch a , a :Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values a :Optional[int] = seqlen + 2 a , a :Union[str, Any] = self.num_layers a , a :Optional[Any] = self.num_attention_heads a :str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) a :Tuple = common_inputs['''attention_mask'''].dtype a :Any = torch.cat( [common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 ) a :Any = [ (torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase ) ] return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX a :Optional[Any] = compute_effective_axis_dimension( _lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX a :Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase ) a :Tuple = compute_effective_axis_dimension( _lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence a :List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size a :Dict = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): if self.task in ["default", "seq2seq-lm"]: a :Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) elif self.task == "causal-lm": a :Dict = self._generate_dummy_inputs_for_causal_lm( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) else: a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if self.task in ["default", "seq2seq-lm"]: a :Optional[int] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: a :Any = super(_lowerCamelCase , self )._flatten_past_key_values_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
94
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers snake_case : Union[str, Any] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None ): """simple docstring""" require_version(deps[pkg] , UpperCAmelCase_ )
94
1
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(_snake_case ) , 'Tatoeba directory does not exist.' ) class _snake_case ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = tempfile.mkdtemp() return TatoebaConverter(save_dir=_lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): self.resolver.convert_models(['''heb-eng'''] ) @slow def SCREAMING_SNAKE_CASE__ ( self ): a , a :Dict = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_lowerCamelCase ) assert mmeta["long_pair"] == "heb-eng"
94
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return "".join(sorted(UpperCAmelCase_ ) ) def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return word_by_signature[signature(UpperCAmelCase_ )] snake_case : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') snake_case : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()}) snake_case : str = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": snake_case : Optional[int] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
94
1
from __future__ import annotations from random import random class _snake_case : def __init__( self , _lowerCamelCase = None ): a :List[Any] = value a :int = random() a :Node | None = None a :Node | None = None def __repr__( self ): from pprint import pformat if self.left is None and self.right is None: return F'''\'{self.value}: {self.prior:.5}\'''' else: return pformat( {F'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 ) def __str__( self ): a :Union[str, Any] = str(self.value ) + ''' ''' a :Union[str, Any] = str(self.left or '''''' ) a :Optional[Any] = str(self.right or '''''' ) return value + left + right def __lowerCamelCase ( UpperCAmelCase_ : Node | None , UpperCAmelCase_ : int ): """simple docstring""" if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: a , a :int = split(root.left , UpperCAmelCase_ ) return left, root else: a , a :Tuple = split(root.right , UpperCAmelCase_ ) return root, right def __lowerCamelCase ( UpperCAmelCase_ : Node | None , UpperCAmelCase_ : Node | None ): """simple docstring""" if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: a :List[str] = merge(left.right , UpperCAmelCase_ ) return left else: a :str = merge(UpperCAmelCase_ , right.left ) return right def __lowerCamelCase ( UpperCAmelCase_ : Node | None , UpperCAmelCase_ : int ): """simple docstring""" a :Union[str, Any] = Node(UpperCAmelCase_ ) a , a :Dict = split(UpperCAmelCase_ , UpperCAmelCase_ ) return merge(merge(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : Node | None , UpperCAmelCase_ : int ): """simple docstring""" a , a :List[Any] = split(UpperCAmelCase_ , value - 1 ) a , a :Optional[int] = split(UpperCAmelCase_ , UpperCAmelCase_ ) return merge(UpperCAmelCase_ , UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : Node | None ): """simple docstring""" if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def __lowerCamelCase ( UpperCAmelCase_ : Node | None , UpperCAmelCase_ : str ): """simple docstring""" for arg in args.split(): if arg[0] == "+": a :Dict = insert(UpperCAmelCase_ , int(arg[1:] ) ) elif arg[0] == "-": a :List[Any] = erase(UpperCAmelCase_ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def __lowerCamelCase ( ): """simple docstring""" a :int = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) a :Union[str, Any] = input() while args != "q": a :int = interact_treap(UpperCAmelCase_ , UpperCAmelCase_ ) print(UpperCAmelCase_ ) a :Optional[int] = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
94
import string import numpy def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ ) class _snake_case : SCREAMING_SNAKE_CASE__ = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) SCREAMING_SNAKE_CASE__ = numpy.vectorize(lambda _snake_case : x % 36 ) SCREAMING_SNAKE_CASE__ = numpy.vectorize(_snake_case ) def __init__( self , _lowerCamelCase ): a :List[Any] = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key a :int = encrypt_key.shape[0] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.key_string.index(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.key_string[round(_lowerCamelCase )] def SCREAMING_SNAKE_CASE__ ( self ): a :str = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: a :Any = det % len(self.key_string ) a :Dict = len(self.key_string ) if greatest_common_divisor(_lowerCamelCase , len(self.key_string ) ) != 1: a :int = ( F'''determinant modular {req_l} of encryption key({det}) ''' F'''is not co prime w.r.t {req_l}.\nTry another key.''' ) raise ValueError(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Optional[Any] = [char for char in text.upper() if char in self.key_string] a :List[str] = chars[-1] while len(_lowerCamelCase ) % self.break_key != 0: chars.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Dict = self.process_text(text.upper() ) a :List[str] = '''''' for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ): a :int = text[i : i + self.break_key] a :Optional[int] = [self.replace_letters(_lowerCamelCase ) for char in batch] a :Union[str, Any] = numpy.array([vec] ).T a :str = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[ 0 ] a :List[Any] = ''''''.join( self.replace_digits(_lowerCamelCase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: a :int = det % len(self.key_string ) a :Tuple = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: a :Tuple = i break a :List[str] = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(_lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :List[Any] = self.make_decrypt_key() a :str = self.process_text(text.upper() ) a :List[Any] = '''''' for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ): a :Optional[Any] = text[i : i + self.break_key] a :List[Any] = [self.replace_letters(_lowerCamelCase ) for char in batch] a :str = numpy.array([vec] ).T a :Dict = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0] a :List[Any] = ''''''.join( self.replace_digits(_lowerCamelCase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def __lowerCamelCase ( ): """simple docstring""" a :Tuple = int(input('''Enter the order of the encryption key: ''' ) ) a :Dict = [] print('''Enter each row of the encryption key with space separated integers''' ) for _ in range(UpperCAmelCase_ ): a :List[str] = [int(UpperCAmelCase_ ) for x in input().split()] hill_matrix.append(UpperCAmelCase_ ) a :Any = HillCipher(numpy.array(UpperCAmelCase_ ) ) print('''Would you like to encrypt or decrypt some text? (1 or 2)''' ) a :Any = input('''\n1. Encrypt\n2. Decrypt\n''' ) if option == "1": a :str = input('''What text would you like to encrypt?: ''' ) print('''Your encrypted text is:''' ) print(hc.encrypt(UpperCAmelCase_ ) ) elif option == "2": a :Dict = input('''What text would you like to decrypt?: ''' ) print('''Your decrypted text is:''' ) print(hc.decrypt(UpperCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
94
1
import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" if ( (cp >= 0x4_E00 and cp <= 0x9_FFF) or (cp >= 0x3_400 and cp <= 0x4_DBF) # or (cp >= 0x20_000 and cp <= 0x2A_6DF) # or (cp >= 0x2A_700 and cp <= 0x2B_73F) # or (cp >= 0x2B_740 and cp <= 0x2B_81F) # or (cp >= 0x2B_820 and cp <= 0x2C_EAF) # or (cp >= 0xF_900 and cp <= 0xF_AFF) or (cp >= 0x2F_800 and cp <= 0x2F_A1F) # ): # return True return False def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" for char in word: a :List[str] = ord(UpperCAmelCase_ ) if not _is_chinese_char(UpperCAmelCase_ ): return 0 return 1 def __lowerCamelCase ( UpperCAmelCase_ : List[str] ): """simple docstring""" a :Any = set() for token in tokens: a :str = len(UpperCAmelCase_ ) > 1 and is_chinese(UpperCAmelCase_ ) if chinese_word: word_set.add(UpperCAmelCase_ ) a :Dict = list(UpperCAmelCase_ ) return word_list def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens a :List[str] = max([len(UpperCAmelCase_ ) for w in chinese_word_set] ) a :Union[str, Any] = bert_tokens a , a :Tuple = 0, len(UpperCAmelCase_ ) while start < end: a :Optional[Any] = True if is_chinese(bert_word[start] ): a :Optional[Any] = min(end - start , UpperCAmelCase_ ) for i in range(UpperCAmelCase_ , 1 , -1 ): a :Dict = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): a :Tuple = '''##''' + bert_word[j] a :Union[str, Any] = start + i a :Union[str, Any] = False break if single_word: start += 1 return bert_word def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : LTP , UpperCAmelCase_ : BertTokenizer ): """simple docstring""" a :List[str] = [] for i in range(0 , len(UpperCAmelCase_ ) , 100 ): a :Optional[int] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws a :str = [get_chinese_word(UpperCAmelCase_ ) for r in res] ltp_res.extend(UpperCAmelCase_ ) assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) a :List[Any] = [] for i in range(0 , len(UpperCAmelCase_ ) , 100 ): a :Union[str, Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) a :Any = [] for input_ids, chinese_word in zip(UpperCAmelCase_ , UpperCAmelCase_ ): a :int = [] for id in input_ids: a :Optional[Any] = bert_tokenizer._convert_id_to_token(UpperCAmelCase_ ) input_tokens.append(UpperCAmelCase_ ) a :int = add_sub_symbol(UpperCAmelCase_ , UpperCAmelCase_ ) a :str = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCAmelCase_ ): if token[:2] == "##": a :List[Any] = token[2:] # save chinese tokens' pos if len(UpperCAmelCase_ ) == 1 and _is_chinese_char(ord(UpperCAmelCase_ ) ): ref_id.append(UpperCAmelCase_ ) ref_ids.append(UpperCAmelCase_ ) assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) return ref_ids def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: a :Dict = f.readlines() a :Dict = [line.strip() for line in data if len(UpperCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' a :Any = LTP(args.ltp ) # faster in GPU device a :Optional[int] = BertTokenizer.from_pretrained(args.bert ) a :List[str] = prepare_ref(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: a :Union[str, Any] = [json.dumps(UpperCAmelCase_ ) + '''\n''' for ref in ref_ids] f.writelines(UpperCAmelCase_ ) if __name__ == "__main__": snake_case : Optional[Any] = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', required=False, type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', required=False, type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''', ) parser.add_argument( '''--bert''', required=False, type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''', ) parser.add_argument( '''--save_path''', required=False, type=str, default='''./resources/ref.txt''', help='''path to save res''', ) snake_case : Tuple = parser.parse_args() main(args)
94
from __future__ import annotations def __lowerCamelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : str ): """simple docstring""" a , a :Optional[Any] = set(UpperCAmelCase_ ), [start] while stack: a :Optional[int] = stack.pop() explored.add(UpperCAmelCase_ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(UpperCAmelCase_ ) return explored snake_case : Optional[int] = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
94
1
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features snake_case : Optional[int] = logging.get_logger(__name__) snake_case : List[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) snake_case : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _snake_case : SCREAMING_SNAKE_CASE__ = field( default=_snake_case , metadata={'help': 'Model type selected in the list: ' + ', '.join(_snake_case )} ) SCREAMING_SNAKE_CASE__ = field( default=_snake_case , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , ) SCREAMING_SNAKE_CASE__ = field( default=64 , metadata={ 'help': ( 'The maximum number of tokens for the question. Questions longer than this will ' 'be truncated to this length.' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=30 , metadata={ 'help': ( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=_snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) SCREAMING_SNAKE_CASE__ = field( default=_snake_case , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} ) SCREAMING_SNAKE_CASE__ = field( default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) SCREAMING_SNAKE_CASE__ = field( default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) SCREAMING_SNAKE_CASE__ = field( default=0 , metadata={ 'help': ( 'language id of input for language-specific xlm models (see' ' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)' ) } , ) SCREAMING_SNAKE_CASE__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} ) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'train' SCREAMING_SNAKE_CASE__ = 'dev' class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = Split.train , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = "pt" , ): a :List[Any] = args a :Any = is_language_sensitive a :int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(_lowerCamelCase , _lowerCamelCase ): try: a :str = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) a :Optional[int] = mode # Load data features from cache or dataset file a :Any = '''v2''' if args.version_2_with_negative else '''v1''' a :Optional[Any] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. a :Optional[Any] = cached_features_file + '''.lock''' with FileLock(_lowerCamelCase ): if os.path.exists(_lowerCamelCase ) and not args.overwrite_cache: a :List[Any] = time.time() a :List[Any] = torch.load(_lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. a :int = self.old_features['''features'''] a :Optional[Any] = self.old_features.get('''dataset''' , _lowerCamelCase ) a :int = self.old_features.get('''examples''' , _lowerCamelCase ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ''' future run''' ) else: if mode == Split.dev: a :Tuple = self.processor.get_dev_examples(args.data_dir ) else: a :Optional[Any] = self.processor.get_train_examples(args.data_dir ) a , a :Union[str, Any] = squad_convert_examples_to_features( examples=self.examples , tokenizer=_lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowerCamelCase , ) a :Optional[Any] = time.time() torch.save( {'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , _lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ): return len(self.features ) def __getitem__( self , _lowerCamelCase ): # Convert to Tensors and build dataset a :List[Any] = self.features[i] a :str = torch.tensor(feature.input_ids , dtype=torch.long ) a :Dict = torch.tensor(feature.attention_mask , dtype=torch.long ) a :List[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long ) a :Dict = torch.tensor(feature.cls_index , dtype=torch.long ) a :Any = torch.tensor(feature.p_mask , dtype=torch.float ) a :Any = torch.tensor(feature.is_impossible , dtype=torch.float ) a :Union[str, Any] = { '''input_ids''': input_ids, '''attention_mask''': attention_mask, '''token_type_ids''': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} ) if self.args.version_2_with_negative: inputs.update({'''is_impossible''': is_impossible} ) if self.is_language_sensitive: inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: a :int = torch.tensor(feature.start_position , dtype=torch.long ) a :Dict = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} ) return inputs
94
import math class _snake_case : def __init__( self , _lowerCamelCase=0 ): # a graph with Node 0,1,...,N-1 a :Optional[int] = n a :Union[str, Any] = [ [math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase ) ] # adjacency matrix for weight a :List[Any] = [ [math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase ) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Tuple = w def SCREAMING_SNAKE_CASE__ ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): a :Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): return self.dp[u][v] if __name__ == "__main__": snake_case : str = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
94
1
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : Optional[int] = logging.get_logger(__name__) snake_case : Optional[int] = { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''', } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'gpt_neox_japanese' def __init__( self , _lowerCamelCase=3_2000 , _lowerCamelCase=2560 , _lowerCamelCase=32 , _lowerCamelCase=32 , _lowerCamelCase=4 , _lowerCamelCase="gelu" , _lowerCamelCase=1.00 , _lowerCamelCase=1_0000 , _lowerCamelCase=2048 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=3_1996 , _lowerCamelCase=3_1999 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , **_lowerCamelCase , ): super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) a :Optional[Any] = vocab_size a :int = max_position_embeddings a :Optional[int] = hidden_size a :Optional[Any] = num_hidden_layers a :Any = num_attention_heads a :Any = intermediate_multiple_size a :Optional[int] = hidden_act a :Tuple = rotary_pct a :Optional[int] = rotary_emb_base a :Any = initializer_range a :List[str] = layer_norm_eps a :List[str] = use_cache a :Tuple = attention_dropout a :List[str] = hidden_dropout
94
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name snake_case : Union[str, Any] = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=8 ): """simple docstring""" a :List[str] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a :int = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _snake_case ( _snake_case ): def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): super().__init__() self.register_modules( unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , ) a :Any = 2 ** (len(self.movq.config.block_out_channels ) - 1) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if latents is None: a :str = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) a :Any = latents.to(_lowerCamelCase ) a :Dict = latents * scheduler.init_noise_sigma return latents def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) a :int = torch.device(F'''cuda:{gpu_id}''' ) a :int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ): if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) a :Any = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a :Tuple = None for cpu_offloaded_model in [self.unet, self.movq]: a , a :List[str] = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase ) # We'll offload the last model manually. a :str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def SCREAMING_SNAKE_CASE__ ( self ): if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(_lowerCamelCase , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_lowerCamelCase ) def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 100 , _lowerCamelCase = 4.0 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ): a :int = self._execution_device a :Optional[Any] = guidance_scale > 1.0 if isinstance(_lowerCamelCase , _lowerCamelCase ): a :Union[str, Any] = torch.cat(_lowerCamelCase , dim=0 ) a :Any = image_embeds.shape[0] * num_images_per_prompt if isinstance(_lowerCamelCase , _lowerCamelCase ): a :List[str] = torch.cat(_lowerCamelCase , dim=0 ) if do_classifier_free_guidance: a :Union[str, Any] = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 ) a :Optional[int] = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 ) a :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase ) self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase ) a :Optional[Any] = self.scheduler.timesteps a :List[str] = self.unet.config.in_channels a , a :str = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor ) # create initial latent a :int = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance a :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a :Union[str, Any] = {'''image_embeds''': image_embeds} a :Optional[Any] = self.unet( sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0] if do_classifier_free_guidance: a , a :Any = noise_pred.split(latents.shape[1] , dim=1 ) a , a :List[str] = noise_pred.chunk(2 ) a , a :int = variance_pred.chunk(2 ) a :List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a :Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a , a :Tuple = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a :int = self.scheduler.step( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0] # post-processing a :int = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: a :str = image * 0.5 + 0.5 a :List[Any] = image.clamp(0 , 1 ) a :str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a :str = self.numpy_to_pil(_lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCamelCase )
94
1
import random from typing import Any def __lowerCamelCase ( UpperCAmelCase_ : list ): """simple docstring""" for _ in range(len(UpperCAmelCase_ ) ): a :str = random.randint(0 , len(UpperCAmelCase_ ) - 1 ) a :Dict = random.randint(0 , len(UpperCAmelCase_ ) - 1 ) a , a :str = data[b], data[a] return data if __name__ == "__main__": snake_case : List[str] = [0, 1, 2, 3, 4, 5, 6, 7] snake_case : List[Any] = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
94
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = '' SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): super().__init__(self , **_lowerCamelCase ) a :Union[str, Any] = repo_info a :int = token a :int = None def SCREAMING_SNAKE_CASE__ ( self ): if self.dir_cache is None: a :Dict = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes a :List[Any] = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ): if not isinstance(self.repo_info , _lowerCamelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha ) return fsspec.open( _lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ): self._get_dirs() a :Union[str, Any] = self._strip_protocol(_lowerCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ): self._get_dirs() a :str = PurePosixPath(path.strip('''/''' ) ) a :Tuple = {} for p, f in self.dir_cache.items(): a :Optional[int] = PurePosixPath(p.strip('''/''' ) ) a :str = p.parent if root == path: a :List[str] = f a :Any = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
94
1
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers snake_case : Union[str, Any] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None ): """simple docstring""" require_version(deps[pkg] , UpperCAmelCase_ )
94
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter snake_case : int = '''Create a default config file for Accelerate with only a few flags set.''' def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any]="no" , UpperCAmelCase_ : str = default_json_config_file , UpperCAmelCase_ : bool = False ): """simple docstring""" a :List[str] = Path(UpperCAmelCase_ ) path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False a :Optional[Any] = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) a :List[Any] = { '''compute_environment''': '''LOCAL_MACHINE''', '''mixed_precision''': mixed_precision, } if torch.cuda.is_available(): a :Dict = torch.cuda.device_count() a :Tuple = num_gpus a :int = False if num_gpus > 1: a :str = '''MULTI_GPU''' else: a :List[Any] = '''NO''' elif is_xpu_available() and use_xpu: a :List[Any] = torch.xpu.device_count() a :Optional[int] = num_xpus a :List[Any] = False if num_xpus > 1: a :int = '''MULTI_XPU''' else: a :str = '''NO''' elif is_npu_available(): a :List[str] = torch.npu.device_count() a :Any = num_npus a :Optional[int] = False if num_npus > 1: a :List[str] = '''MULTI_NPU''' else: a :Dict = '''NO''' else: a :str = 0 a :Optional[Any] = True a :Optional[Any] = 1 a :str = '''NO''' a :List[str] = ClusterConfig(**UpperCAmelCase_ ) config.to_json_file(UpperCAmelCase_ ) return path def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a :List[Any] = parser.add_parser('''default''' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ ) parser.add_argument( '''--config_file''' , default=UpperCAmelCase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=UpperCAmelCase_ , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=UpperCAmelCase_ ) return parser def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" a :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
94
1
from __future__ import annotations from math import pow, sqrt def __lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ): """simple docstring""" if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance == 0: return {"resistance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(UpperCAmelCase_ , 2 ) + pow(UpperCAmelCase_ , 2 ) )} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
94
import sys snake_case : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowerCamelCase ( UpperCAmelCase_ : str = N ): """simple docstring""" a :Optional[Any] = -sys.maxsize - 1 for i in range(len(UpperCAmelCase_ ) - 12 ): a :Dict = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: a :str = product return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
94
1
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class _snake_case ( _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True def SCREAMING_SNAKE_CASE__ ( self ): super().setUp() # We have a SentencePiece fixture for testing a :Dict = XLMRobertaTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = '''<pad>''' a :Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_lowerCamelCase ) , 1002 ) def SCREAMING_SNAKE_CASE__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1002 ) def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = XLMRobertaTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) a :str = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a :Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) a :List[str] = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) a :int = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def SCREAMING_SNAKE_CASE__ ( self ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return a :Dict = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a :List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) a :Optional[Any] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) a :Any = tempfile.mkdtemp() a :int = tokenizer_r.save_pretrained(_lowerCamelCase ) a :List[Any] = tokenizer_p.save_pretrained(_lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) a :Union[str, Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_lowerCamelCase , _lowerCamelCase ) # Checks everything loads correctly in the same way a :int = tokenizer_r.from_pretrained(_lowerCamelCase ) a :Union[str, Any] = tokenizer_p.from_pretrained(_lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowerCamelCase ) # Save tokenizer rust, legacy_format=True a :Optional[int] = tempfile.mkdtemp() a :Any = tokenizer_r.save_pretrained(_lowerCamelCase , legacy_format=_lowerCamelCase ) a :List[str] = tokenizer_p.save_pretrained(_lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(_lowerCamelCase , _lowerCamelCase ) # Checks everything loads correctly in the same way a :str = tokenizer_r.from_pretrained(_lowerCamelCase ) a :Tuple = tokenizer_p.from_pretrained(_lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) ) shutil.rmtree(_lowerCamelCase ) # Save tokenizer rust, legacy_format=False a :str = tempfile.mkdtemp() a :int = tokenizer_r.save_pretrained(_lowerCamelCase , legacy_format=_lowerCamelCase ) a :Any = tokenizer_p.save_pretrained(_lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way a :Union[str, Any] = tokenizer_r.from_pretrained(_lowerCamelCase ) a :List[str] = tokenizer_p.from_pretrained(_lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) ) shutil.rmtree(_lowerCamelCase ) @cached_property def SCREAMING_SNAKE_CASE__ ( self ): return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def SCREAMING_SNAKE_CASE__ ( self ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_lowerCamelCase , f.name ) a :Union[str, Any] = XLMRobertaTokenizer(f.name , keep_accents=_lowerCamelCase ) a :Optional[Any] = pickle.dumps(_lowerCamelCase ) pickle.loads(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): if not self.test_rust_tokenizer: return a :Union[str, Any] = self.get_tokenizer() a :Dict = self.get_rust_tokenizer() a :Any = '''I was born in 92000, and this is falsé.''' a :int = tokenizer.tokenize(_lowerCamelCase ) a :Dict = rust_tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) a :Dict = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) a :Union[str, Any] = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) a :Union[str, Any] = self.get_rust_tokenizer() a :Tuple = tokenizer.encode(_lowerCamelCase ) a :Union[str, Any] = rust_tokenizer.encode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[int] = '''Hello World!''' a :Optional[Any] = [0, 3_5378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): a :int = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) a :List[Any] = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 17_9459, 12_4850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 1_0114, 711, 152, 20, 6, 5, 2_2376, 642, 1221, 1_5190, 3_4153, 450, 5608, 959, 1119, 5_7702, 136, 186, 47, 1098, 2_9367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 5_0901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # fmt: off a :Optional[Any] = {'''input_ids''': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
94
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]="attention" ): """simple docstring""" a :Optional[int] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel'''] a :int = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel'''] return k, o, q, v def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=False ): """simple docstring""" if split_mlp_wi: a :int = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel'''] a :Dict = (wi_a, wi_a) else: a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel'''] a :Dict = params[F'''{prefix}/layers_{i}/mlp/wo/kernel'''] return wi, wo def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ): """simple docstring""" return params[F'''{prefix}/layers_{i}/{layer_name}/scale'''] def __lowerCamelCase ( UpperCAmelCase_ : dict , *, UpperCAmelCase_ : int , UpperCAmelCase_ : bool ): """simple docstring""" a :str = traverse_util.flatten_dict(variables['''target'''] ) a :Any = {'''/'''.join(UpperCAmelCase_ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi a :Any = '''encoder/layers_0/mlp/wi_0/kernel''' in old print('''Split MLP:''' , UpperCAmelCase_ ) a :Optional[Any] = collections.OrderedDict() # Shared embeddings. a :Union[str, Any] = old['''token_embedder/embedding'''] # Encoder. for i in range(UpperCAmelCase_ ): # Block i, layer 0 (Self Attention). a :Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_attention_layer_norm''' ) a , a , a , a :Optional[int] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''attention''' ) a :List[Any] = layer_norm a :str = k.T a :Dict = o.T a :int = q.T a :Optional[Any] = v.T # Block i, layer 1 (MLP). a :Tuple = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_mlp_layer_norm''' ) a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , UpperCAmelCase_ ) a :Any = layer_norm if split_mlp_wi: a :Any = wi[0].T a :Tuple = wi[1].T else: a :List[str] = wi.T a :List[Any] = wo.T a :Union[str, Any] = old[ '''encoder/relpos_bias/rel_embedding''' ].T a :Optional[Any] = old['''encoder/encoder_norm/scale'''] if not is_encoder_only: # Decoder. for i in range(UpperCAmelCase_ ): # Block i, layer 0 (Self Attention). a :List[str] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_self_attention_layer_norm''' ) a , a , a , a :List[Any] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''self_attention''' ) a :List[Any] = layer_norm a :Tuple = k.T a :int = o.T a :Any = q.T a :Optional[int] = v.T # Block i, layer 1 (Cross Attention). a :str = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_cross_attention_layer_norm''' ) a , a , a , a :Any = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''encoder_decoder_attention''' ) a :str = layer_norm a :Optional[Any] = k.T a :Any = o.T a :Dict = q.T a :Optional[Any] = v.T # Block i, layer 2 (MLP). a :Optional[int] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_mlp_layer_norm''' ) a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , UpperCAmelCase_ ) a :Optional[int] = layer_norm if split_mlp_wi: a :int = wi[0].T a :Tuple = wi[1].T else: a :str = wi.T a :Dict = wo.T a :Any = old['''decoder/decoder_norm/scale'''] a :Optional[Any] = old[ '''decoder/relpos_bias/rel_embedding''' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: a :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T return new def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : bool ): """simple docstring""" a :List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: a :Optional[Any] = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: a :Tuple = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) a :Optional[Any] = state_dict['''shared.weight'''] return state_dict def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ): """simple docstring""" a :Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase_ ) a :Optional[int] = convert_tax_to_pytorch(UpperCAmelCase_ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase_ ) a :Tuple = make_state_dict(UpperCAmelCase_ , UpperCAmelCase_ ) model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ): """simple docstring""" a :List[Any] = TaConfig.from_json_file(UpperCAmelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: a :Any = TaEncoderModel(UpperCAmelCase_ ) else: a :List[str] = TaForConditionalGeneration(UpperCAmelCase_ ) # Load weights from tf checkpoint load_tax_weights_in_ta(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(UpperCAmelCase_ ) # Verify that we can load the checkpoint. model.from_pretrained(UpperCAmelCase_ ) print('''Done''' ) if __name__ == "__main__": snake_case : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) snake_case : Optional[Any] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
94
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case : Union[str, Any] = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Optional[Any] = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : List[Any] = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Any = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
94
def __lowerCamelCase ( UpperCAmelCase_ : int = 100_0000 ): """simple docstring""" a :Any = set(range(3 , UpperCAmelCase_ , 2 ) ) primes.add(2 ) for p in range(3 , UpperCAmelCase_ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , UpperCAmelCase_ , UpperCAmelCase_ ) ) ) a :Union[str, Any] = [float(UpperCAmelCase_ ) for n in range(limit + 1 )] for p in primes: for n in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F"""{solution() = }""")
94
1
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=2 , _lowerCamelCase=24 , _lowerCamelCase=16 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=None , _lowerCamelCase=2 , _lowerCamelCase=2 , ): a :Any = parent a :Optional[Any] = batch_size a :Any = patch_size a :Tuple = max_length a :Dict = num_mel_bins a :str = is_training a :Any = use_labels a :str = hidden_size a :int = num_hidden_layers a :List[str] = num_attention_heads a :str = intermediate_size a :Optional[Any] = hidden_act a :Dict = hidden_dropout_prob a :Dict = attention_probs_dropout_prob a :int = type_sequence_label_size a :List[Any] = initializer_range a :str = scope a :List[str] = frequency_stride a :Any = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a :List[str] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a :List[Any] = (self.max_length - self.patch_size) // self.time_stride + 1 a :int = frequency_out_dimension * time_out_dimension a :Dict = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) a :Any = None if self.use_labels: a :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a :Tuple = self.get_config() return config, input_values, labels def SCREAMING_SNAKE_CASE__ ( self ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Dict = ASTModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() a :Tuple = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ) :int = config_and_inputs a :Dict = {'''input_values''': input_values} return config, inputs_dict @require_torch class _snake_case ( _snake_case , _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = ASTModelTester(self ) a :Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason='''AST does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE__ ( self ): pass def SCREAMING_SNAKE_CASE__ ( self ): a , a :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a :Any = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a :Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self ): a , a :Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a :int = model_class(_lowerCamelCase ) a :Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a :int = [*signature.parameters.keys()] a :Dict = ['''input_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a :Tuple = ASTModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def __lowerCamelCase ( ): """simple docstring""" a :int = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' ) a , a :int = torchaudio.load(UpperCAmelCase_ ) return audio, sampling_rate @require_torch @require_torchaudio class _snake_case ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ ( self ): return ( ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ) if is_torchaudio_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[int] = self.default_feature_extractor a :str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(_lowerCamelCase ) a :List[Any] = self.default_feature_extractor a , a :List[Any] = prepare_audio() a :List[str] = audio.squeeze().numpy() a :Optional[Any] = feature_extractor(_lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): a :List[str] = model(**_lowerCamelCase ) # verify the logits a :Optional[int] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) a :Optional[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
94
snake_case : str = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' snake_case : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] snake_case : int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
94
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool snake_case : Union[str, Any] = { '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'facebook/nllb-200-distilled-600M' SCREAMING_SNAKE_CASE__ = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) SCREAMING_SNAKE_CASE__ = 'translator' SCREAMING_SNAKE_CASE__ = AutoTokenizer SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM SCREAMING_SNAKE_CASE__ = LANGUAGE_CODES SCREAMING_SNAKE_CASE__ = ['text', 'text', 'text'] SCREAMING_SNAKE_CASE__ = ['text'] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) a :str = self.lang_to_code[src_lang] a :Optional[Any] = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( _lowerCamelCase , return_tensors='''pt''' , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.model.generate(**_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_lowerCamelCase )
94
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'ClapFeatureExtractor' SCREAMING_SNAKE_CASE__ = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self , _lowerCamelCase , _lowerCamelCase ): super().__init__(_lowerCamelCase , _lowerCamelCase ) def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: a :Optional[int] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if audios is not None: a :Tuple = self.feature_extractor( _lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if text is not None and audios is not None: a :Union[str, Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.tokenizer.model_input_names a :str = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
94
1
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" while b: a , a :Optional[Any] = b, a % b return a def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b ) def __lowerCamelCase ( ): """simple docstring""" print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
94
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=True ): """simple docstring""" model.train() a :str = model(UpperCAmelCase_ ) a :List[str] = F.mse_loss(UpperCAmelCase_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int=False ): """simple docstring""" set_seed(42 ) a :List[Any] = RegressionModel() a :Any = deepcopy(UpperCAmelCase_ ) a :Tuple = RegressionDataset(length=80 ) a :Tuple = DataLoader(UpperCAmelCase_ , batch_size=16 ) model.to(accelerator.device ) if sched: a :str = AdamW(params=model.parameters() , lr=1E-3 ) a :str = AdamW(params=ddp_model.parameters() , lr=1E-3 ) a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 ) a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 ) # Make a copy of `model` if sched: a , a , a , a :List[Any] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: a , a :str = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a , a , a :str = get_training_setup(UpperCAmelCase_ ) # Use a single batch a , a :Dict = next(iter(UpperCAmelCase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a , a :int = accelerator.gather((ddp_input, ddp_target) ) a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: # Sync grads step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :Union[str, Any] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a , a , a :List[str] = get_training_setup(UpperCAmelCase_ ) # Use a single batch a , a :List[str] = next(iter(UpperCAmelCase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a , a :List[Any] = accelerator.gather((ddp_input, ddp_target) ) a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: # Sync grads step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :Any = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=False ): """simple docstring""" a :Optional[int] = Accelerator( split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a , a , a :List[str] = get_training_setup(UpperCAmelCase_ ) for iteration, batch in enumerate(UpperCAmelCase_ ): a , a :List[Any] = batch.values() # Gather the distributed inputs and targs for the base model a , a :List[str] = accelerator.gather((ddp_input, ddp_target) ) a , a :List[str] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCAmelCase_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :List[str] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] GradientState._reset_state() def __lowerCamelCase ( UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[int]=False ): """simple docstring""" a :Optional[Any] = Accelerator( split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a , a , a , a , a , a , a :Optional[Any] = get_training_setup(UpperCAmelCase_ , UpperCAmelCase_ ) for iteration, batch in enumerate(UpperCAmelCase_ ): a , a :int = batch.values() # Gather the distributed inputs and targs for the base model a , a :List[str] = accelerator.gather((ddp_input, ddp_target) ) a , a :str = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCAmelCase_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' a :Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCAmelCase_ )) if accelerator.num_processes > 1: check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def __lowerCamelCase ( ): """simple docstring""" a :Optional[Any] = Accelerator() a :int = RegressionDataset(length=80 ) a :List[str] = DataLoader(UpperCAmelCase_ , batch_size=16 ) a :List[Any] = RegressionDataset(length=96 ) a :Any = DataLoader(UpperCAmelCase_ , batch_size=16 ) a , a :Optional[int] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCAmelCase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ ) if iteration < len(UpperCAmelCase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCAmelCase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ ) if batch_num < len(UpperCAmelCase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __lowerCamelCase ( ): """simple docstring""" a :Optional[int] = Accelerator() a :Optional[int] = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(UpperCAmelCase_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(UpperCAmelCase_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCAmelCase_ , UpperCAmelCase_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCAmelCase_ , UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : Tuple ): """simple docstring""" main() if __name__ == "__main__": main()
94
1
snake_case : Any = '''Input must be a string of 8 numbers plus letter''' snake_case : Any = '''TRWAGMYFPDXBNJZSQVHLCKE''' def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a :Optional[Any] = F'''Expected string as input, found {type(UpperCAmelCase_ ).__name__}''' raise TypeError(UpperCAmelCase_ ) a :Tuple = spanish_id.replace('''-''' , '''''' ).upper() if len(UpperCAmelCase_ ) != 9: raise ValueError(UpperCAmelCase_ ) try: a :List[Any] = int(spanish_id_clean[0:8] ) a :int = spanish_id_clean[8] except ValueError as ex: raise ValueError(UpperCAmelCase_ ) from ex if letter.isdigit(): raise ValueError(UpperCAmelCase_ ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
94
def __lowerCamelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int ): """simple docstring""" if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError('''The length of profit and weight must be same.''' ) if max_weight <= 0: raise ValueError('''max_weight must greater than zero.''' ) if any(p < 0 for p in profit ): raise ValueError('''Profit can not be negative.''' ) if any(w < 0 for w in weight ): raise ValueError('''Weight can not be negative.''' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. a :Optional[int] = [p / w for p, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )] # Creating a copy of the list and sorting profit/weight in ascending order a :List[Any] = sorted(UpperCAmelCase_ ) # declaring useful variables a :Dict = len(UpperCAmelCase_ ) a :Tuple = 0 a :List[Any] = 0 a :str = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight a :List[Any] = sorted_profit_by_weight[length - i - 1] a :Optional[Any] = profit_by_weight.index(UpperCAmelCase_ ) a :Optional[int] = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( '''Input profits, weights, and then max_weight (all positive ints) separated by ''' '''spaces.''' ) snake_case : Union[str, Any] = [int(x) for x in input('''Input profits separated by spaces: ''').split()] snake_case : Tuple = [int(x) for x in input('''Input weights separated by spaces: ''').split()] snake_case : str = int(input('''Max weight allowed: ''')) # Function Call calc_profit(profit, weight, max_weight)
94
1
from manim import * class _snake_case ( _snake_case ): def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = Rectangle(height=0.5 , width=0.5 ) a :Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) a :List[Any] = [mem.copy() for i in range(6 )] a :int = [mem.copy() for i in range(6 )] a :Optional[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 ) a :List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 ) a :Union[str, Any] = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 ) a :Any = Text('''CPU''' , font_size=24 ) a :Dict = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCamelCase ) a :Optional[Any] = [mem.copy() for i in range(4 )] a :Dict = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 ) a :Union[str, Any] = Text('''GPU''' , font_size=24 ) a :List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase ) gpu.move_to([-1, -1, 0] ) self.add(_lowerCamelCase ) a :List[str] = [mem.copy() for i in range(6 )] a :Union[str, Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 ) a :Optional[int] = Text('''Model''' , font_size=24 ) a :int = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase ) model.move_to([3, -1.0, 0] ) self.add(_lowerCamelCase ) a :Optional[Any] = [] for i, rect in enumerate(_lowerCamelCase ): rect.set_stroke(_lowerCamelCase ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) a :int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCamelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=_lowerCamelCase , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=_lowerCamelCase , buff=0.0 ) self.add(_lowerCamelCase ) cpu_targs.append(_lowerCamelCase ) a :Optional[Any] = [mem.copy() for i in range(6 )] a :Tuple = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 ) a :Any = Text('''Loaded Checkpoint''' , font_size=24 ) a :Dict = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , aligned_edge=_lowerCamelCase , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) a :Optional[int] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) a :Optional[Any] = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_lowerCamelCase , _lowerCamelCase ) a :Union[str, Any] = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) a :Dict = MarkupText( F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCamelCase ) , Write(_lowerCamelCase ) ) self.play(Write(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) ) a :Optional[Any] = [] a :List[Any] = [] for i, rect in enumerate(_lowerCamelCase ): a :Tuple = fill.copy().set_fill(_lowerCamelCase , opacity=0.7 ) target.move_to(_lowerCamelCase ) first_animations.append(GrowFromCenter(_lowerCamelCase , run_time=1 ) ) a :Optional[Any] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) ) self.play(*_lowerCamelCase ) self.play(*_lowerCamelCase ) self.wait()
94
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case : Dict = logging.get_logger(__name__) snake_case : Tuple = '''▁''' snake_case : Any = {'''vocab_file''': '''sentencepiece.bpe.model'''} snake_case : Tuple = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } snake_case : int = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask'] def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ): # Mask token behave like a normal word, i.e. include the space before it a :Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token a :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) a :str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token a :Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab a :List[str] = 1 a :Dict = len(self.sp_model ) + self.fairseq_offset a :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): a :List[str] = self.__dict__.copy() a :Optional[int] = None a :int = self.sp_model.serialized_model_proto() return state def __setstate__( self , _lowerCamelCase ): a :Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): a :Union[str, Any] = {} a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a :List[Any] = [self.cls_token_id] a :Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): a :int = [self.sep_token_id] a :int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE__ ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def SCREAMING_SNAKE_CASE__ ( self ): a :Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] a :Optional[Any] = self.sp_model.PieceToId(_lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Tuple = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if not os.path.isdir(_lowerCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return a :int = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , '''wb''' ) as fi: a :List[Any] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
94
1
def __lowerCamelCase ( UpperCAmelCase_ : list ): """simple docstring""" if len(UpperCAmelCase_ ) <= 1: return [tuple(UpperCAmelCase_ )] a :Tuple = [] def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ): a :str = [0] * n res.append(tuple(UpperCAmelCase_ ) ) a :Optional[Any] = 0 while i < n: if c[i] < i: if i % 2 == 0: a , a :str = arr[i], arr[0] else: a , a :List[str] = arr[i], arr[c[i]] res.append(tuple(UpperCAmelCase_ ) ) c[i] += 1 a :int = 0 else: a :List[str] = 0 i += 1 generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) return res if __name__ == "__main__": snake_case : Any = input('''Enter numbers separated by a comma:\n''').strip() snake_case : str = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
94
def __lowerCamelCase ( UpperCAmelCase_ : int = 1000 ): """simple docstring""" a , a :int = 1, 1 a :Any = 2 while True: a :Optional[int] = 0 a :str = fa + fa a , a :List[Any] = fa, f index += 1 for _ in str(UpperCAmelCase_ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
94
1
import sys snake_case : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowerCamelCase ( UpperCAmelCase_ : str = N ): """simple docstring""" a :Optional[Any] = -sys.maxsize - 1 for i in range(len(UpperCAmelCase_ ) - 12 ): a :Dict = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: a :str = product return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
94
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=1000 , ): a :str = parent a :str = batch_size a :List[Any] = seq_length a :Union[str, Any] = is_training a :str = use_input_mask a :Tuple = use_token_type_ids a :Optional[int] = use_labels a :Union[str, Any] = vocab_size a :Optional[Any] = hidden_size a :Any = num_hidden_layers a :Optional[int] = num_attention_heads a :Tuple = intermediate_size a :Dict = hidden_act a :str = hidden_dropout_prob a :List[Any] = attention_probs_dropout_prob a :List[Any] = max_position_embeddings a :List[str] = type_vocab_size a :List[Any] = type_sequence_label_size a :Union[str, Any] = initializer_range a :Optional[Any] = num_labels a :Optional[int] = num_choices a :Union[str, Any] = scope a :List[str] = range_bbox def SCREAMING_SNAKE_CASE__ ( self ): a :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment a :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a :List[Any] = bbox[i, j, 3] a :List[str] = bbox[i, j, 1] a :List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: a :Dict = bbox[i, j, 2] a :Dict = bbox[i, j, 0] a :Any = t a :Optional[Any] = tf.convert_to_tensor(_lowerCamelCase ) a :int = None if self.use_input_mask: a :List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a :Optional[int] = None if self.use_token_type_ids: a :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a :List[Any] = None a :List[Any] = None a :List[Any] = None if self.use_labels: a :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a :List[str] = ids_tensor([self.batch_size] , self.num_choices ) a :List[Any] = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = TFLayoutLMModel(config=_lowerCamelCase ) a :Dict = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase , token_type_ids=_lowerCamelCase ) a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :List[str] = TFLayoutLMForMaskedLM(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = self.num_labels a :List[Any] = TFLayoutLMForSequenceClassification(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :int = self.num_labels a :Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[Any] = TFLayoutLMForQuestionAnswering(config=_lowerCamelCase ) a :Optional[int] = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) :List[Any] = config_and_inputs a :Union[str, Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class _snake_case ( _snake_case , _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE__ = ( { 'feature-extraction': TFLayoutLMModel, 'fill-mask': TFLayoutLMForMaskedLM, 'text-classification': TFLayoutLMForSequenceClassification, 'token-classification': TFLayoutLMForTokenClassification, 'zero-shot': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = 10 def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = TFLayoutLMModelTester(self ) a :Dict = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ): a :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a :str = TFLayoutLMModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) @unittest.skip('''Onnx compliancy broke with TF 2.10''' ) def SCREAMING_SNAKE_CASE__ ( self ): pass def __lowerCamelCase ( ): """simple docstring""" a :Tuple = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 a :Any = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 a :List[str] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 a :List[str] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) a :Any = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class _snake_case ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' ) a , a , a , a , a :Optional[Any] = prepare_layoutlm_batch_inputs() # forward pass a :Tuple = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) # test the sequence output on [0, :3, :3] a :List[str] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-3 ) ) # test the pooled output on [1, :3] a :List[str] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized sequence classification head a :str = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 ) a , a , a , a , a :List[str] = prepare_layoutlm_batch_inputs() # forward pass a :List[Any] = model( input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar a :Union[str, Any] = outputs.loss a :Optional[Any] = (2,) self.assertEqual(loss.shape , _lowerCamelCase ) # test the shape of the logits a :Any = outputs.logits a :Tuple = (2, 2) self.assertEqual(logits.shape , _lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized token classification head a :Dict = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 ) a , a , a , a , a :Dict = prepare_layoutlm_batch_inputs() # forward pass a :List[Any] = model( input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) # test the shape of the logits a :Optional[Any] = outputs.logits a :List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized token classification head a :List[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' ) a , a , a , a , a :Any = prepare_layoutlm_batch_inputs() # forward pass a :str = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) # test the shape of the logits a :Optional[int] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _lowerCamelCase ) self.assertEqual(outputs.end_logits.shape , _lowerCamelCase )
94
1
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class _snake_case ( _snake_case , _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = AutoencoderKL SCREAMING_SNAKE_CASE__ = 'sample' SCREAMING_SNAKE_CASE__ = 1e-2 @property def SCREAMING_SNAKE_CASE__ ( self ): a :str = 4 a :int = 3 a :int = (32, 32) a :List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase ) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ ( self ): return (3, 32, 32) @property def SCREAMING_SNAKE_CASE__ ( self ): return (3, 32, 32) def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } a :Any = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self ): pass def SCREAMING_SNAKE_CASE__ ( self ): pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def SCREAMING_SNAKE_CASE__ ( self ): # enable deterministic behavior for gradient checkpointing a , a :Any = self.prepare_init_args_and_inputs_for_common() a :Dict = self.model_class(**_lowerCamelCase ) model.to(_lowerCamelCase ) assert not model.is_gradient_checkpointing and model.training a :List[str] = model(**_lowerCamelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() a :Tuple = torch.randn_like(_lowerCamelCase ) a :List[str] = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing a :Optional[Any] = self.model_class(**_lowerCamelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_lowerCamelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training a :Optional[int] = model_a(**_lowerCamelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() a :List[Any] = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) a :str = dict(model.named_parameters() ) a :Any = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def SCREAMING_SNAKE_CASE__ ( self ): a , a :Optional[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_lowerCamelCase ) a :str = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ ( self ): a :int = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) a :Optional[int] = model.to(_lowerCamelCase ) model.eval() if torch_device == "mps": a :List[str] = torch.manual_seed(0 ) else: a :List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 ) a :Any = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) a :List[Any] = image.to(_lowerCamelCase ) with torch.no_grad(): a :Tuple = model(_lowerCamelCase , sample_posterior=_lowerCamelCase , generator=_lowerCamelCase ).sample a :Any = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": a :Optional[Any] = torch.tensor( [ -4.00_78e-01, -3.83_23e-04, -1.26_81e-01, -1.14_62e-01, 2.00_95e-01, 1.08_93e-01, -8.82_47e-02, -3.03_61e-01, -9.86_44e-03, ] ) elif torch_device == "cpu": a :Tuple = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: a :Optional[int] = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2 ) ) @slow class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): return F'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCamelCase ) for s in shape] )}.npy''' def SCREAMING_SNAKE_CASE__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 , _lowerCamelCase=(4, 3, 512, 512) , _lowerCamelCase=False ): a :Union[str, Any] = torch.floataa if fpaa else torch.floataa a :Any = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) ).to(_lowerCamelCase ).to(_lowerCamelCase ) return image def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase="CompVis/stable-diffusion-v1-4" , _lowerCamelCase=False ): a :Optional[Any] = '''fp16''' if fpaa else None a :Optional[int] = torch.floataa if fpaa else torch.floataa a :Any = AutoencoderKL.from_pretrained( _lowerCamelCase , subfolder='''vae''' , torch_dtype=_lowerCamelCase , revision=_lowerCamelCase , ) model.to(_lowerCamelCase ).eval() return model def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ): if torch_device == "mps": return torch.manual_seed(_lowerCamelCase ) return torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = self.get_sd_vae_model() a :str = self.get_sd_image(_lowerCamelCase ) a :Dict = self.get_generator(_lowerCamelCase ) with torch.no_grad(): a :int = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample assert sample.shape == image.shape a :List[str] = sample[-1, -2:, -2:, :2].flatten().float().cpu() a :Optional[int] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): a :List[Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase ) a :str = self.get_sd_image(_lowerCamelCase , fpaa=_lowerCamelCase ) a :Any = self.get_generator(_lowerCamelCase ) with torch.no_grad(): a :int = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample assert sample.shape == image.shape a :Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu() a :int = torch.tensor(_lowerCamelCase ) assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = self.get_sd_vae_model() a :Optional[Any] = self.get_sd_image(_lowerCamelCase ) with torch.no_grad(): a :str = model(_lowerCamelCase ).sample assert sample.shape == image.shape a :Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() a :Optional[int] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): a :int = self.get_sd_vae_model() a :str = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): a :Union[str, Any] = model.decode(_lowerCamelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] a :str = sample[-1, -2:, :2, -2:].flatten().cpu() a :List[Any] = torch.tensor(_lowerCamelCase ) assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): a :int = self.get_sd_vae_model(fpaa=_lowerCamelCase ) a :List[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase ) with torch.no_grad(): a :List[Any] = model.decode(_lowerCamelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] a :Any = sample[-1, -2:, :2, -2:].flatten().float().cpu() a :Any = torch.tensor(_lowerCamelCase ) assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Dict = self.get_sd_vae_model(fpaa=_lowerCamelCase ) a :Dict = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase ) with torch.no_grad(): a :Union[str, Any] = model.decode(_lowerCamelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): a :Optional[int] = model.decode(_lowerCamelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Union[str, Any] = self.get_sd_vae_model() a :Union[str, Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): a :Dict = model.decode(_lowerCamelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): a :Tuple = model.decode(_lowerCamelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): a :Union[str, Any] = self.get_sd_vae_model() a :Optional[int] = self.get_sd_image(_lowerCamelCase ) a :Dict = self.get_generator(_lowerCamelCase ) with torch.no_grad(): a :Optional[Any] = model.encode(_lowerCamelCase ).latent_dist a :int = dist.sample(generator=_lowerCamelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] a :Any = sample[0, -1, -3:, -3:].flatten().cpu() a :List[str] = torch.tensor(_lowerCamelCase ) a :int = 3e-3 if torch_device != '''mps''' else 1e-2 assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase )
94
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" while b: a , a :Optional[Any] = b, a % b return a def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b ) def __lowerCamelCase ( ): """simple docstring""" print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
94
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case : Dict = logging.get_logger(__name__) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = ['pixel_values'] def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PIL.Image.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): super().__init__(**_lowerCamelCase ) a :Optional[Any] = size if size is not None else {'''height''': 256, '''width''': 256} a :Optional[Any] = get_size_dict(_lowerCamelCase ) a :Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} a :Union[str, Any] = get_size_dict(_lowerCamelCase , param_name='''crop_size''' ) a :List[Any] = do_resize a :Union[str, Any] = size a :Optional[int] = resample a :Dict = do_center_crop a :List[Any] = crop_size a :Optional[int] = do_rescale a :List[Any] = rescale_factor a :List[Any] = do_normalize a :int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a :Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PIL.Image.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ): a :Optional[Any] = get_size_dict(_lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return resize( _lowerCamelCase , size=(size['''height'''], size['''width''']) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ): a :Optional[Any] = get_size_dict(_lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(_lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ): return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ): return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ): a :List[str] = do_resize if do_resize is not None else self.do_resize a :Any = resample if resample is not None else self.resample a :List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop a :int = do_rescale if do_rescale is not None else self.do_rescale a :Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor a :Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize a :Optional[int] = image_mean if image_mean is not None else self.image_mean a :Union[str, Any] = image_std if image_std is not None else self.image_std a :str = size if size is not None else self.size a :str = get_size_dict(_lowerCamelCase ) a :Optional[Any] = crop_size if crop_size is not None else self.crop_size a :Union[str, Any] = get_size_dict(_lowerCamelCase , param_name='''crop_size''' ) a :Union[str, Any] = make_list_of_images(_lowerCamelCase ) if not valid_images(_lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. a :str = [to_numpy_array(_lowerCamelCase ) for image in images] if do_resize: a :Optional[int] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images] if do_center_crop: a :int = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images] if do_rescale: a :Any = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images] if do_normalize: a :Dict = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images] a :Any = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images] a :str = {'''pixel_values''': images} return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
94
from __future__ import annotations def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] | None = None , UpperCAmelCase_ : dict[str, float] | None = None , UpperCAmelCase_ : bool = False , ): """simple docstring""" a :str = cipher_alphabet or [chr(UpperCAmelCase_ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) a :List[Any] = { '''a''': 0.08497, '''b''': 0.01492, '''c''': 0.02202, '''d''': 0.04253, '''e''': 0.11162, '''f''': 0.02228, '''g''': 0.02015, '''h''': 0.06094, '''i''': 0.07546, '''j''': 0.00153, '''k''': 0.01292, '''l''': 0.04025, '''m''': 0.02406, '''n''': 0.06749, '''o''': 0.07507, '''p''': 0.01929, '''q''': 0.00095, '''r''': 0.07587, '''s''': 0.06327, '''t''': 0.09356, '''u''': 0.02758, '''v''': 0.00978, '''w''': 0.02560, '''x''': 0.00150, '''y''': 0.01994, '''z''': 0.00077, } else: # Custom frequencies dictionary a :Dict = frequencies_dict if not case_sensitive: a :Union[str, Any] = ciphertext.lower() # Chi squared statistic values a :dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(UpperCAmelCase_ ) ): a :int = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet a :Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( UpperCAmelCase_ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter a :List[Any] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: a :Optional[int] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message a :List[Any] = decrypted_with_shift.lower().count(UpperCAmelCase_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies a :Dict = frequencies[letter] * occurrences # Complete the chi squared statistic formula a :Any = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message a :int = decrypted_with_shift.count(UpperCAmelCase_ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies a :Tuple = frequencies[letter] * occurrences # Complete the chi squared statistic formula a :Optional[Any] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary a :Optional[Any] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(UpperCAmelCase_ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] a :int = min( UpperCAmelCase_ , key=UpperCAmelCase_ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( a ) , ( a ) , ) :Optional[int] = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
94
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE__ = 'Pix2StructImageProcessor' SCREAMING_SNAKE_CASE__ = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = False super().__init__(_lowerCamelCase , _lowerCamelCase ) def __call__( self , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 2048 , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ): if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None and not self.image_processor.is_vqa: a :int = self.tokenizer a :int = self.tokenizer( text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values a :Optional[Any] = self.image_processor( _lowerCamelCase , return_tensors=_lowerCamelCase , max_patches=_lowerCamelCase , **_lowerCamelCase ) else: # add pixel_values and bbox a :Optional[int] = self.image_processor( _lowerCamelCase , return_tensors=_lowerCamelCase , max_patches=_lowerCamelCase , header_text=_lowerCamelCase , **_lowerCamelCase ) if text is not None and not self.image_processor.is_vqa: a :Any = self.tokenizer( text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , ) if "attention_mask" in text_encoding: a :List[str] = text_encoding.pop('''attention_mask''' ) if "input_ids" in text_encoding: a :List[Any] = text_encoding.pop('''input_ids''' ) else: a :str = None if text_encoding is not None: encoding_image_processor.update(_lowerCamelCase ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = self.tokenizer.model_input_names a :Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
94
from maths.prime_factors import prime_factors def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a :Dict = F'''Input value of [number={number}] must be an integer''' raise TypeError(UpperCAmelCase_ ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
94
1
from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run UpperCAmelCase__ = True except (ImportError, AttributeError): UpperCAmelCase__ = object def _a ( *a :str , **a :Optional[Any] ) -> int: pass UpperCAmelCase__ = False UpperCAmelCase__ = logging.get_logger("transformers-cli/serving") def _a ( a :Namespace ) -> Any: a = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(a , args.host , args.port , args.workers ) class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = 42 class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = 42 __snake_case = 42 class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = 42 class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = 42 class lowercase_ ( lowercase ): '''simple docstring''' @staticmethod def __lowerCAmelCase ( __UpperCAmelCase : ArgumentParser ) ->Dict: """simple docstring""" a = parser.add_parser( '''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' ) serve_parser.add_argument( '''--task''' , type=__UpperCAmelCase , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , ) serve_parser.add_argument('''--host''' , type=__UpperCAmelCase , default='''localhost''' , help='''Interface the server will listen on.''' ) serve_parser.add_argument('''--port''' , type=__UpperCAmelCase , default=8_888 , help='''Port the serving will listen to.''' ) serve_parser.add_argument('''--workers''' , type=__UpperCAmelCase , default=1 , help='''Number of http workers''' ) serve_parser.add_argument('''--model''' , type=__UpperCAmelCase , help='''Model\'s name or path to stored model.''' ) serve_parser.add_argument('''--config''' , type=__UpperCAmelCase , help='''Model\'s config name or path to stored model.''' ) serve_parser.add_argument('''--tokenizer''' , type=__UpperCAmelCase , help='''Tokenizer name to use.''' ) serve_parser.add_argument( '''--device''' , type=__UpperCAmelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) serve_parser.set_defaults(func=__UpperCAmelCase ) def __init__( self : Dict , __UpperCAmelCase : Pipeline , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) ->List[Any]: """simple docstring""" a = pipeline a = host a = port a = workers if not _serve_dependencies_installed: raise RuntimeError( '''Using serve command requires FastAPI and uvicorn. ''' '''Please install transformers with [serving]: pip install "transformers[serving]".''' '''Or install FastAPI and uvicorn separately.''' ) else: logger.info(F"""Serving model over {host}:{port}""" ) a = FastAPI( routes=[ APIRoute( '''/''' , self.model_info , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=['''GET'''] , ), APIRoute( '''/tokenize''' , self.tokenize , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=['''POST'''] , ), APIRoute( '''/detokenize''' , self.detokenize , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=['''POST'''] , ), APIRoute( '''/forward''' , self.forward , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=['''POST'''] , ), ] , timeout=600 , ) def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]: """simple docstring""" run(self._app , host=self.host , port=self.port , workers=self.workers ) def __lowerCAmelCase ( self : int ) ->Union[str, Any]: """simple docstring""" return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : str = Body(__UpperCAmelCase , embed=__UpperCAmelCase ) , __UpperCAmelCase : bool = Body(__UpperCAmelCase , embed=__UpperCAmelCase ) ) ->Tuple: """simple docstring""" try: a = self._pipeline.tokenizer.tokenize(__UpperCAmelCase ) if return_ids: a = self._pipeline.tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) return ServeTokenizeResult(tokens=__UpperCAmelCase , tokens_ids=__UpperCAmelCase ) else: return ServeTokenizeResult(tokens=__UpperCAmelCase ) except Exception as e: raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(__UpperCAmelCase )} ) def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[int] = Body(__UpperCAmelCase , embed=__UpperCAmelCase ) , __UpperCAmelCase : bool = Body(__UpperCAmelCase , embed=__UpperCAmelCase ) , __UpperCAmelCase : bool = Body(__UpperCAmelCase , embed=__UpperCAmelCase ) , ) ->Any: """simple docstring""" try: a = self._pipeline.tokenizer.decode(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return ServeDeTokenizeResult(model='''''' , text=__UpperCAmelCase ) except Exception as e: raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(__UpperCAmelCase )} ) async def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int]=Body(__UpperCAmelCase , embed=__UpperCAmelCase ) ) ->Any: """simple docstring""" if len(__UpperCAmelCase ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model a = self._pipeline(__UpperCAmelCase ) return ServeForwardResult(output=__UpperCAmelCase ) except Exception as e: raise HTTPException(500 , {'''error''': str(__UpperCAmelCase )} )
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging snake_case : List[str] = logging.get_logger(__name__) snake_case : int = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'blenderbot-small' SCREAMING_SNAKE_CASE__ = ['past_key_values'] SCREAMING_SNAKE_CASE__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=512 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=512 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ): a :Dict = vocab_size a :Optional[Any] = max_position_embeddings a :str = d_model a :Any = encoder_ffn_dim a :Optional[int] = encoder_layers a :List[str] = encoder_attention_heads a :List[str] = decoder_ffn_dim a :Optional[int] = decoder_layers a :str = decoder_attention_heads a :List[str] = dropout a :Optional[int] = attention_dropout a :Dict = activation_dropout a :List[str] = activation_function a :List[Any] = init_std a :Optional[int] = encoder_layerdrop a :Tuple = decoder_layerdrop a :List[str] = use_cache a :int = encoder_layers a :Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , ) class _snake_case ( _snake_case ): @property def SCREAMING_SNAKE_CASE__ ( self ): if self.task in ["default", "seq2seq-lm"]: a :Optional[Any] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: a :Union[str, Any] = {0: '''batch'''} a :Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: a :Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} a :str = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. a :Optional[int] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: a , a :str = self.num_layers for i in range(_lowerCamelCase ): a :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} a :List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: a :Optional[int] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE__ ( self ): if self.task in ["default", "seq2seq-lm"]: a :List[Any] = super().outputs else: a :Union[str, Any] = super(_lowerCamelCase , self ).outputs if self.use_past: a , a :int = self.num_layers for i in range(_lowerCamelCase ): a :int = {0: '''batch''', 2: '''past_sequence + sequence'''} a :Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Generate decoder inputs a :Dict = seq_length if not self.use_past else 1 a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) a :List[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} a :List[str] = dict(**_lowerCamelCase , **_lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch a , a :Optional[Any] = common_inputs['''input_ids'''].shape a :Tuple = common_inputs['''decoder_input_ids'''].shape[1] a , a :List[Any] = self.num_attention_heads a :List[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) a :int = decoder_seq_length + 3 a :Union[str, Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) a :Union[str, Any] = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 ) a :List[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered a , a :Optional[int] = self.num_layers a :str = min(_lowerCamelCase , _lowerCamelCase ) a :str = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers a :Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(_lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase ), ) ) # TODO: test this. a :int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(_lowerCamelCase , _lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch a , a :Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values a :Optional[int] = seqlen + 2 a , a :Union[str, Any] = self.num_layers a , a :Optional[Any] = self.num_attention_heads a :str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) a :Tuple = common_inputs['''attention_mask'''].dtype a :Any = torch.cat( [common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 ) a :Any = [ (torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase ) ] return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX a :Optional[Any] = compute_effective_axis_dimension( _lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX a :Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase ) a :Tuple = compute_effective_axis_dimension( _lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence a :List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size a :Dict = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ): if self.task in ["default", "seq2seq-lm"]: a :Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) elif self.task == "causal-lm": a :Dict = self._generate_dummy_inputs_for_causal_lm( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) else: a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if self.task in ["default", "seq2seq-lm"]: a :Optional[int] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: a :Any = super(_lowerCamelCase , self )._flatten_past_key_values_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
94
0
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): def __init__(self : Tuple , __a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ): super().__init__() UpperCAmelCase_ = nn.ModuleList(__a ) def _lowercase (self : Tuple , __a : torch.FloatTensor , __a : Union[torch.Tensor, float, int] , __a : torch.Tensor , __a : List[torch.tensor] , __a : List[float] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[Dict[str, Any]] = None , __a : bool = False , __a : bool = True , ): for i, (image, scale, controlnet) in enumerate(zip(__a , __a , self.nets ) ): UpperCAmelCase_ , UpperCAmelCase_ = controlnet( __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) # merge samples if i == 0: UpperCAmelCase_ , UpperCAmelCase_ = down_samples, mid_sample else: UpperCAmelCase_ = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__a , __a ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _lowercase (self : List[str] , __a : Union[str, os.PathLike] , __a : bool = True , __a : Callable = None , __a : bool = False , __a : Optional[str] = None , ): UpperCAmelCase_ = 0 UpperCAmelCase_ = save_directory for controlnet in self.nets: controlnet.save_pretrained( __a , is_main_process=__a , save_function=__a , safe_serialization=__a , variant=__a , ) idx += 1 UpperCAmelCase_ = model_path_to_save + f"""_{idx}""" @classmethod def _lowercase (cls : Tuple , __a : Optional[Union[str, os.PathLike]] , **__a : List[Any] ): UpperCAmelCase_ = 0 UpperCAmelCase_ = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... UpperCAmelCase_ = pretrained_model_path while os.path.isdir(__a ): UpperCAmelCase_ = ControlNetModel.from_pretrained(__a , **__a ) controlnets.append(__a ) idx += 1 UpperCAmelCase_ = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__a )} controlnets loaded from {pretrained_model_path}.""" ) if len(__a ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__a )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__a )
1
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers snake_case : Union[str, Any] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None ): """simple docstring""" require_version(deps[pkg] , UpperCAmelCase_ )
94
0
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : Union[str, Any] = {'vocab_file': 'spiece.model'} lowerCamelCase : int = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str]=False , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[int]=False , UpperCamelCase : str="<s>" , UpperCamelCase : Optional[Any]="</s>" , UpperCamelCase : Any="<unk>" , UpperCamelCase : Tuple="<sep>" , UpperCamelCase : Union[str, Any]="<pad>" , UpperCamelCase : Any="<cls>" , UpperCamelCase : Tuple="<mask>" , UpperCamelCase : Any=["<eop>", "<eod>"] , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Optional[int] , ): '''simple docstring''' lowercase__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , ) lowercase__ = 3 lowercase__ = do_lower_case lowercase__ = remove_space lowercase__ = keep_accents lowercase__ = vocab_file lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( '''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. ''' '''See https://pypi.org/project/jieba/ for installation.''' ) lowercase__ = jieba lowercase__ = str.maketrans(''' \n''' , '''\u2582\u2583''' ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def UpperCamelCase__ (self : Dict ): '''simple docstring''' return len(self.sp_model ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self : str ): '''simple docstring''' lowercase__ = self.__dict__.copy() lowercase__ = None return state def __setstate__(self : Dict , UpperCamelCase : int ): '''simple docstring''' lowercase__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__ = {} lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[Any] ): '''simple docstring''' if self.remove_space: lowercase__ = ''' '''.join(inputs.strip().split() ) else: lowercase__ = inputs lowercase__ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: lowercase__ = unicodedata.normalize('''NFKD''' , UpperCamelCase ) lowercase__ = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase )] ) if self.do_lower_case: lowercase__ = outputs.lower() return outputs def UpperCamelCase__ (self : List[str] , UpperCamelCase : str ): '''simple docstring''' lowercase__ = self.preprocess_text(UpperCamelCase ) lowercase__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase ) lowercase__ = [] for piece in pieces: if len(UpperCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowercase__ = cur_pieces[1:] else: lowercase__ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase ) else: new_pieces.append(UpperCamelCase ) return new_pieces def UpperCamelCase__ (self : Dict , UpperCamelCase : Optional[int] ): '''simple docstring''' return self.sp_model.PieceToId(UpperCamelCase ) def UpperCamelCase__ (self : int , UpperCamelCase : Any ): '''simple docstring''' return self.sp_model.IdToPiece(UpperCamelCase ) def UpperCamelCase__ (self : List[str] , UpperCamelCase : Optional[int] ): '''simple docstring''' lowercase__ = ''''''.join(UpperCamelCase ).replace(UpperCamelCase , ''' ''' ).strip() return out_string def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def UpperCamelCase__ (self : int , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) if token_ids_a is not None: return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1, 1] return ([0] * len(UpperCamelCase )) + [1, 1] def UpperCamelCase__ (self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.sep_token_id] lowercase__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def UpperCamelCase__ (self : Dict , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowercase__ = os.path.join( UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase , '''wb''' ) as fi: lowercase__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase ) return (out_vocab_file,) def UpperCamelCase__ (self : Optional[Any] , *UpperCamelCase : int , **UpperCamelCase : List[str] ): '''simple docstring''' lowercase__ = super()._decode(*UpperCamelCase , **UpperCamelCase ) lowercase__ = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' ) return text
2
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return "".join(sorted(UpperCAmelCase_ ) ) def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return word_by_signature[signature(UpperCAmelCase_ )] snake_case : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') snake_case : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()}) snake_case : str = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": snake_case : Optional[int] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
94
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A ( __snake_case ): __magic_name__ = ['''image_processor''', '''tokenizer'''] __magic_name__ = '''ViTImageProcessor''' __magic_name__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , SCREAMING_SNAKE_CASE , ) A : int = kwargs.pop('''feature_extractor''' ) A : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" if text is None and visual_prompt is None and images is None: raise ValueError('''You have to specify either text, visual prompt or images.''' ) if text is not None and visual_prompt is not None: raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' ) if text is not None: A : str = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if visual_prompt is not None: A : str = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if images is not None: A : Any = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if visual_prompt is not None and images is not None: A : Optional[int] = { '''pixel_values''': image_features.pixel_values, '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: A : List[Any] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: A : int = { '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE ) , tensor_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def __lowerCAmelCase ( self ) -> str: """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE , ) return self.image_processor
3
import string import numpy def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ ) class _snake_case : SCREAMING_SNAKE_CASE__ = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) SCREAMING_SNAKE_CASE__ = numpy.vectorize(lambda _snake_case : x % 36 ) SCREAMING_SNAKE_CASE__ = numpy.vectorize(_snake_case ) def __init__( self , _lowerCamelCase ): a :List[Any] = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key a :int = encrypt_key.shape[0] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.key_string.index(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.key_string[round(_lowerCamelCase )] def SCREAMING_SNAKE_CASE__ ( self ): a :str = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: a :Any = det % len(self.key_string ) a :Dict = len(self.key_string ) if greatest_common_divisor(_lowerCamelCase , len(self.key_string ) ) != 1: a :int = ( F'''determinant modular {req_l} of encryption key({det}) ''' F'''is not co prime w.r.t {req_l}.\nTry another key.''' ) raise ValueError(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Optional[Any] = [char for char in text.upper() if char in self.key_string] a :List[str] = chars[-1] while len(_lowerCamelCase ) % self.break_key != 0: chars.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Dict = self.process_text(text.upper() ) a :List[str] = '''''' for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ): a :int = text[i : i + self.break_key] a :Optional[int] = [self.replace_letters(_lowerCamelCase ) for char in batch] a :Union[str, Any] = numpy.array([vec] ).T a :str = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[ 0 ] a :List[Any] = ''''''.join( self.replace_digits(_lowerCamelCase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: a :int = det % len(self.key_string ) a :Tuple = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: a :Tuple = i break a :List[str] = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(_lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :List[Any] = self.make_decrypt_key() a :str = self.process_text(text.upper() ) a :List[Any] = '''''' for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ): a :Optional[Any] = text[i : i + self.break_key] a :List[Any] = [self.replace_letters(_lowerCamelCase ) for char in batch] a :str = numpy.array([vec] ).T a :Dict = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0] a :List[Any] = ''''''.join( self.replace_digits(_lowerCamelCase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def __lowerCamelCase ( ): """simple docstring""" a :Tuple = int(input('''Enter the order of the encryption key: ''' ) ) a :Dict = [] print('''Enter each row of the encryption key with space separated integers''' ) for _ in range(UpperCAmelCase_ ): a :List[str] = [int(UpperCAmelCase_ ) for x in input().split()] hill_matrix.append(UpperCAmelCase_ ) a :Any = HillCipher(numpy.array(UpperCAmelCase_ ) ) print('''Would you like to encrypt or decrypt some text? (1 or 2)''' ) a :Any = input('''\n1. Encrypt\n2. Decrypt\n''' ) if option == "1": a :str = input('''What text would you like to encrypt?: ''' ) print('''Your encrypted text is:''' ) print(hc.encrypt(UpperCAmelCase_ ) ) elif option == "2": a :Dict = input('''What text would you like to decrypt?: ''' ) print('''Your decrypted text is:''' ) print(hc.decrypt(UpperCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
94
0
'''simple docstring''' import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( """The `inpainting.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionInpaintPipeline` instead.""" )
4
from __future__ import annotations def __lowerCamelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : str ): """simple docstring""" a , a :Optional[Any] = set(UpperCAmelCase_ ), [start] while stack: a :Optional[int] = stack.pop() explored.add(UpperCAmelCase_ ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(UpperCAmelCase_ ) return explored snake_case : Optional[int] = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
94
0
from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
5
import math class _snake_case : def __init__( self , _lowerCamelCase=0 ): # a graph with Node 0,1,...,N-1 a :Optional[int] = n a :Union[str, Any] = [ [math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase ) ] # adjacency matrix for weight a :List[Any] = [ [math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase ) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Tuple = w def SCREAMING_SNAKE_CASE__ ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): a :Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): return self.dp[u][v] if __name__ == "__main__": snake_case : str = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
94
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A : List[Any] = logging.get_logger(__name__) A : Tuple = { 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class __A( a , a ): snake_case_ = '''resnet''' snake_case_ = ['''basic''', '''bottleneck'''] def __init__( self , _snake_case=3 , _snake_case=64 , _snake_case=[256, 512, 1_024, 2_048] , _snake_case=[3, 4, 6, 3] , _snake_case="bottleneck" , _snake_case="relu" , _snake_case=False , _snake_case=None , _snake_case=None , **_snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_snake_case ) if layer_type not in self.layer_types: raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" ) __a = num_channels __a = embedding_size __a = hidden_sizes __a = depths __a = layer_type __a = hidden_act __a = downsample_in_first_stage __a = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_snake_case ) + 1 )] __a , __a = get_aligned_output_features_output_indices( out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names ) class __A( a ): snake_case_ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> float: '''simple docstring''' return 1E-3
6
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name snake_case : Union[str, Any] = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=8 ): """simple docstring""" a :List[str] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a :int = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _snake_case ( _snake_case ): def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): super().__init__() self.register_modules( unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , ) a :Any = 2 ** (len(self.movq.config.block_out_channels ) - 1) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if latents is None: a :str = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) a :Any = latents.to(_lowerCamelCase ) a :Dict = latents * scheduler.init_noise_sigma return latents def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) a :int = torch.device(F'''cuda:{gpu_id}''' ) a :int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ): if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) a :Any = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a :Tuple = None for cpu_offloaded_model in [self.unet, self.movq]: a , a :List[str] = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase ) # We'll offload the last model manually. a :str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def SCREAMING_SNAKE_CASE__ ( self ): if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(_lowerCamelCase , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_lowerCamelCase ) def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 100 , _lowerCamelCase = 4.0 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ): a :int = self._execution_device a :Optional[Any] = guidance_scale > 1.0 if isinstance(_lowerCamelCase , _lowerCamelCase ): a :Union[str, Any] = torch.cat(_lowerCamelCase , dim=0 ) a :Any = image_embeds.shape[0] * num_images_per_prompt if isinstance(_lowerCamelCase , _lowerCamelCase ): a :List[str] = torch.cat(_lowerCamelCase , dim=0 ) if do_classifier_free_guidance: a :Union[str, Any] = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 ) a :Optional[int] = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 ) a :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase ) self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase ) a :Optional[Any] = self.scheduler.timesteps a :List[str] = self.unet.config.in_channels a , a :str = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor ) # create initial latent a :int = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance a :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a :Union[str, Any] = {'''image_embeds''': image_embeds} a :Optional[Any] = self.unet( sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0] if do_classifier_free_guidance: a , a :Any = noise_pred.split(latents.shape[1] , dim=1 ) a , a :List[str] = noise_pred.chunk(2 ) a , a :int = variance_pred.chunk(2 ) a :List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a :Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a , a :Tuple = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a :int = self.scheduler.step( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0] # post-processing a :int = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: a :str = image * 0.5 + 0.5 a :List[Any] = image.clamp(0 , 1 ) a :str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a :str = self.numpy_to_pil(_lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCamelCase )
94
0
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = (DPMSolverSinglestepScheduler,) lowerCamelCase = (('num_inference_steps', 25),) def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]: '''simple docstring''' A__ = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf' ), 'variance_type': None, } config.update(**lowercase_ ) return config def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ , A__ = sample, sample for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ): A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : List[str] )-> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int: '''simple docstring''' if scheduler is None: A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample return sample def snake_case__ ( self : Any )-> str: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = 5_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_574 ) < 1E-3 def snake_case__ ( self : Optional[Any] )-> List[Any]: '''simple docstring''' for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase_ ) def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 A__ = DEISMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverMultistepScheduler.from_config(scheduler.config ) A__ = UniPCMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Tuple )-> Any: '''simple docstring''' self.check_over_configs(thresholding=lowercase_ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,) def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) A__ = self.full_loop( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers" def snake_case__ ( self : Optional[int] )-> Tuple: '''simple docstring''' self.check_over_configs(lower_order_final=lowercase_ ) self.check_over_configs(lower_order_final=lowercase_ ) def snake_case__ ( self : Tuple )-> Optional[int]: '''simple docstring''' self.check_over_configs(lambda_min_clipped=-float('inf' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' self.check_over_configs(variance_type=lowercase_ ) self.check_over_configs(variance_type='learned_range' ) def snake_case__ ( self : str )-> Any: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=lowercase_,time_step=0 ) def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' A__ = self.full_loop() A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Any )-> Union[str, Any]: '''simple docstring''' A__ = self.full_loop(use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_248 ) < 1E-3 def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction' ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.1_453 ) < 1E-3 def snake_case__ ( self : Tuple )-> int: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.0_649 ) < 1E-3 def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample assert sample.dtype == torch.floataa
7
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = '' SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): super().__init__(self , **_lowerCamelCase ) a :Union[str, Any] = repo_info a :int = token a :int = None def SCREAMING_SNAKE_CASE__ ( self ): if self.dir_cache is None: a :Dict = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes a :List[Any] = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ): if not isinstance(self.repo_info , _lowerCamelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha ) return fsspec.open( _lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ): self._get_dirs() a :Union[str, Any] = self._strip_protocol(_lowerCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ): self._get_dirs() a :str = PurePosixPath(path.strip('''/''' ) ) a :Tuple = {} for p, f in self.dir_cache.items(): a :Optional[int] = PurePosixPath(p.strip('''/''' ) ) a :str = p.parent if root == path: a :List[str] = f a :Any = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
94
0
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCAmelCase_ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCAmelCase_ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ''' Hello world! cécé herlolip''' lowerCAmelCase_ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = dct.pop(SCREAMING_SNAKE_CASE__ ) snake_case_ = val def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) snake_case_ = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_, snake_case_ = emb.weight.shape snake_case_ = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) snake_case_ = emb.weight.data return lin_layer @torch.no_grad() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): if not os.path.exists(SCREAMING_SNAKE_CASE__ ): snake_case_ = torch.hub.load('''pytorch/fairseq''' , SCREAMING_SNAKE_CASE__ ).eval() else: snake_case_ = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: snake_case_ = checkpoint_path.replace('''.''' , '''-''' ) snake_case_ = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) snake_case_ = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) snake_case_ = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all(): raise ValueError( F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": snake_case_ = bart.state_dict() remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) snake_case_ = state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) snake_case_ = bart.predict('''mnli''' , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ ) snake_case_ = model(SCREAMING_SNAKE_CASE__ )[0] # logits else: # no classification heads to worry about snake_case_ = bart.model.state_dict() remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) snake_case_ = state_dict['''decoder.embed_tokens.weight'''] snake_case_ = bart.extract_features(SCREAMING_SNAKE_CASE__ ) if hf_checkpoint_name == "facebook/bart-large": snake_case_ = BartModel(SCREAMING_SNAKE_CASE__ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) snake_case_ = model(SCREAMING_SNAKE_CASE__ ).model[0] else: snake_case_ = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt model.model.load_state_dict(SCREAMING_SNAKE_CASE__ ) if hasattr(SCREAMING_SNAKE_CASE__ , '''lm_head''' ): snake_case_ = make_linear_from_emb(model.model.shared ) snake_case_ = model.model(SCREAMING_SNAKE_CASE__ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCAmelCase_ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
8
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter snake_case : int = '''Create a default config file for Accelerate with only a few flags set.''' def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any]="no" , UpperCAmelCase_ : str = default_json_config_file , UpperCAmelCase_ : bool = False ): """simple docstring""" a :List[str] = Path(UpperCAmelCase_ ) path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False a :Optional[Any] = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) a :List[Any] = { '''compute_environment''': '''LOCAL_MACHINE''', '''mixed_precision''': mixed_precision, } if torch.cuda.is_available(): a :Dict = torch.cuda.device_count() a :Tuple = num_gpus a :int = False if num_gpus > 1: a :str = '''MULTI_GPU''' else: a :List[Any] = '''NO''' elif is_xpu_available() and use_xpu: a :List[Any] = torch.xpu.device_count() a :Optional[int] = num_xpus a :List[Any] = False if num_xpus > 1: a :int = '''MULTI_XPU''' else: a :str = '''NO''' elif is_npu_available(): a :List[str] = torch.npu.device_count() a :Any = num_npus a :Optional[int] = False if num_npus > 1: a :List[str] = '''MULTI_NPU''' else: a :Dict = '''NO''' else: a :str = 0 a :Optional[Any] = True a :Optional[Any] = 1 a :str = '''NO''' a :List[str] = ClusterConfig(**UpperCAmelCase_ ) config.to_json_file(UpperCAmelCase_ ) return path def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a :List[Any] = parser.add_parser('''default''' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ ) parser.add_argument( '''--config_file''' , default=UpperCAmelCase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=UpperCAmelCase_ , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=UpperCAmelCase_ ) return parser def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" a :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
94
0
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _lowercase ( A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = DebertaTokenizer SCREAMING_SNAKE_CASE__ : Union[str, Any] = True SCREAMING_SNAKE_CASE__ : Optional[int] = DebertaTokenizerFast def __magic_name__( self :List[str] ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] __SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) __SCREAMING_SNAKE_CASE : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __SCREAMING_SNAKE_CASE : Union[str, Any] = {'''unk_token''': '''[UNK]'''} __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase__ ) ) def __magic_name__( self :Any , **lowerCAmelCase__ :str ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __magic_name__( self :int , lowerCAmelCase__ :int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : Optional[int] = '''lower newer''' __SCREAMING_SNAKE_CASE : int = '''lower newer''' return input_text, output_text def __magic_name__( self :str ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Tuple = '''lower newer''' __SCREAMING_SNAKE_CASE : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] __SCREAMING_SNAKE_CASE : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : List[str] = tokenizer('''Hello''' , '''World''' ) __SCREAMING_SNAKE_CASE : Dict = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , lowerCAmelCase__ ) @slow def __magic_name__( self :Optional[int] ) -> List[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode( '''sequence builders''' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def __magic_name__( self :Any ) -> Dict: __SCREAMING_SNAKE_CASE : Tuple = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __SCREAMING_SNAKE_CASE : Dict = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = [tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) for seq in encoding['''input_ids''']] # fmt: off __SCREAMING_SNAKE_CASE : List[Any] = { '''input_ids''': [ [1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __SCREAMING_SNAKE_CASE : List[str] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , lowerCAmelCase__ ) for expected, decoded in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
9
import sys snake_case : int = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowerCamelCase ( UpperCAmelCase_ : str = N ): """simple docstring""" a :Optional[Any] = -sys.maxsize - 1 for i in range(len(UpperCAmelCase_ ) - 12 ): a :Dict = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: a :str = product return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
94
0
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } __A = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): lowerCamelCase__: str =getattr(__a , __a ) if weight_type is not None: lowerCamelCase__: Tuple =getattr(__a , __a ).shape else: lowerCamelCase__: List[str] =hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCamelCase__: List[Any] =value elif weight_type == "weight_g": lowerCamelCase__: Tuple =value elif weight_type == "weight_v": lowerCamelCase__: List[str] =value elif weight_type == "bias": lowerCamelCase__: List[str] =value else: lowerCamelCase__: str =value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCAmelCase_ ( __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Tuple =[] lowerCamelCase__: List[str] =fairseq_model.state_dict() lowerCamelCase__: Any =hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase__: Tuple =False if "conv_layers" in name: load_conv_layer( __a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , ) lowerCamelCase__: Optional[int] =True else: for key, mapped_key in MAPPING.items(): lowerCamelCase__: List[Any] ="unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue lowerCamelCase__: str =True if "*" in mapped_key: lowerCamelCase__: List[str] =name.split(__a )[0].split("." )[-2] lowerCamelCase__: Tuple =mapped_key.replace("*" , __a ) if "weight_g" in name: lowerCamelCase__: str ="weight_g" elif "weight_v" in name: lowerCamelCase__: Union[str, Any] ="weight_v" elif "bias" in name: lowerCamelCase__: Dict ="bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase__: Dict ="weight" else: lowerCamelCase__: Any =None set_recursively(__a , __a , __a , __a , __a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =full_name.split("conv_layers." )[-1] lowerCamelCase__: Tuple =name.split("." ) lowerCamelCase__: Any =int(items[0] ) lowerCamelCase__: Optional[int] =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCamelCase__: List[Any] =value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCamelCase__: Any =value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) lowerCamelCase__: Optional[int] =value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCamelCase__: str =value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__a ) @torch.no_grad() def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> Optional[Any]: """simple docstring""" if config_path is not None: lowerCamelCase__: Dict =UniSpeechSatConfig.from_pretrained(__a ) else: lowerCamelCase__: Union[str, Any] =UniSpeechSatConfig() lowerCamelCase__: Any ="" if is_finetuned: lowerCamelCase__: Optional[int] =UniSpeechSatForCTC(__a ) else: lowerCamelCase__: Optional[int] =UniSpeechSatForPreTraining(__a ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) lowerCamelCase__: Optional[Any] =model[0].eval() recursively_load_weights(__a , __a ) hf_wavavec.save_pretrained(__a ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __A = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
10
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]="attention" ): """simple docstring""" a :Optional[int] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel'''] a :int = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel'''] return k, o, q, v def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=False ): """simple docstring""" if split_mlp_wi: a :int = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel'''] a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel'''] a :Dict = (wi_a, wi_a) else: a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel'''] a :Dict = params[F'''{prefix}/layers_{i}/mlp/wo/kernel'''] return wi, wo def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ): """simple docstring""" return params[F'''{prefix}/layers_{i}/{layer_name}/scale'''] def __lowerCamelCase ( UpperCAmelCase_ : dict , *, UpperCAmelCase_ : int , UpperCAmelCase_ : bool ): """simple docstring""" a :str = traverse_util.flatten_dict(variables['''target'''] ) a :Any = {'''/'''.join(UpperCAmelCase_ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi a :Any = '''encoder/layers_0/mlp/wi_0/kernel''' in old print('''Split MLP:''' , UpperCAmelCase_ ) a :Optional[Any] = collections.OrderedDict() # Shared embeddings. a :Union[str, Any] = old['''token_embedder/embedding'''] # Encoder. for i in range(UpperCAmelCase_ ): # Block i, layer 0 (Self Attention). a :Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_attention_layer_norm''' ) a , a , a , a :Optional[int] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''attention''' ) a :List[Any] = layer_norm a :str = k.T a :Dict = o.T a :int = q.T a :Optional[Any] = v.T # Block i, layer 1 (MLP). a :Tuple = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_mlp_layer_norm''' ) a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , UpperCAmelCase_ ) a :Any = layer_norm if split_mlp_wi: a :Any = wi[0].T a :Tuple = wi[1].T else: a :List[str] = wi.T a :List[Any] = wo.T a :Union[str, Any] = old[ '''encoder/relpos_bias/rel_embedding''' ].T a :Optional[Any] = old['''encoder/encoder_norm/scale'''] if not is_encoder_only: # Decoder. for i in range(UpperCAmelCase_ ): # Block i, layer 0 (Self Attention). a :List[str] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_self_attention_layer_norm''' ) a , a , a , a :List[Any] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''self_attention''' ) a :List[Any] = layer_norm a :Tuple = k.T a :int = o.T a :Any = q.T a :Optional[int] = v.T # Block i, layer 1 (Cross Attention). a :str = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_cross_attention_layer_norm''' ) a , a , a , a :Any = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''encoder_decoder_attention''' ) a :str = layer_norm a :Optional[Any] = k.T a :Any = o.T a :Dict = q.T a :Optional[Any] = v.T # Block i, layer 2 (MLP). a :Optional[int] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_mlp_layer_norm''' ) a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , UpperCAmelCase_ ) a :Optional[int] = layer_norm if split_mlp_wi: a :int = wi[0].T a :Tuple = wi[1].T else: a :str = wi.T a :Dict = wo.T a :Any = old['''decoder/decoder_norm/scale'''] a :Optional[Any] = old[ '''decoder/relpos_bias/rel_embedding''' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: a :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T return new def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : bool ): """simple docstring""" a :List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: a :Optional[Any] = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: a :Tuple = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) a :Optional[Any] = state_dict['''shared.weight'''] return state_dict def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ): """simple docstring""" a :Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase_ ) a :Optional[int] = convert_tax_to_pytorch(UpperCAmelCase_ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase_ ) a :Tuple = make_state_dict(UpperCAmelCase_ , UpperCAmelCase_ ) model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ): """simple docstring""" a :List[Any] = TaConfig.from_json_file(UpperCAmelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: a :Any = TaEncoderModel(UpperCAmelCase_ ) else: a :List[str] = TaForConditionalGeneration(UpperCAmelCase_ ) # Load weights from tf checkpoint load_tax_weights_in_ta(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(UpperCAmelCase_ ) # Verify that we can load the checkpoint. model.from_pretrained(UpperCAmelCase_ ) print('''Done''' ) if __name__ == "__main__": snake_case : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) snake_case : Optional[Any] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
94
0
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ): _A , _A : Any = image.size _A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) _A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0 _A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 ) _A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ ) return 2.0 * image - 1.0 class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]: super().__init__() self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase) @torch.no_grad() def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Tuple = 1 elif isinstance(__lowerCamelCase , torch.Tensor): _A : Union[str, Any] = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}") if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Union[str, Any] = preprocess(__lowerCamelCase) _A , _A : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width) _A : str = next(self.unet.parameters()).dtype _A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase) _A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase) # set timesteps and move to the correct device self.scheduler.set_timesteps(__lowerCamelCase , device=self.device) _A : Any = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _A : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) _A : Optional[int] = {} if accepts_eta: _A : List[Any] = eta for t in self.progress_bar(__lowerCamelCase): # concat latents and low resolution image in the channel dimension. _A : List[Any] = torch.cat([latents, image] , dim=1) _A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase) # predict the noise residual _A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample # compute the previous noisy sample x_t -> x_t-1 _A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample # decode the image latents with the VQVAE _A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample _A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0) _A : Tuple = image / 2 + 0.5 _A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _A : Optional[int] = self.numpy_to_pil(__lowerCamelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase)
11
def __lowerCamelCase ( UpperCAmelCase_ : int = 100_0000 ): """simple docstring""" a :Any = set(range(3 , UpperCAmelCase_ , 2 ) ) primes.add(2 ) for p in range(3 , UpperCAmelCase_ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , UpperCAmelCase_ , UpperCAmelCase_ ) ) ) a :Union[str, Any] = [float(UpperCAmelCase_ ) for n in range(limit + 1 )] for p in primes: for n in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F"""{solution() = }""")
94
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : List[str] = 'speech_to_text_2' UpperCAmelCase__ : Dict = ['past_key_values'] UpperCAmelCase__ : Tuple = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self: Optional[int] , UpperCamelCase_: List[str]=1_00_00 , UpperCamelCase_: int=6 , UpperCamelCase_: Dict=20_48 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: int="relu" , UpperCamelCase_: Optional[Any]=2_56 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.02 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Any=2 , UpperCamelCase_: Optional[int]=10_24 , **UpperCamelCase_: Optional[Any] , ): __lowerCamelCase = vocab_size __lowerCamelCase = d_model __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = decoder_layers __lowerCamelCase = decoder_attention_heads __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = decoder_layerdrop __lowerCamelCase = use_cache __lowerCamelCase = decoder_layers __lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __lowerCamelCase = max_target_positions super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
12
snake_case : str = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' snake_case : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] snake_case : int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
94
0
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def A_ ( _UpperCAmelCase ): if isinstance(_UpperCAmelCase , collections.abc.Iterable ): return x return (x, x) @require_tf class __lowercase : """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : int): pass def _SCREAMING_SNAKE_CASE ( self : str): pass def _SCREAMING_SNAKE_CASE ( self : Dict): pass def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None , **lowerCAmelCase__ : Tuple): SCREAMING_SNAKE_CASE_: int = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = TFVisionTextDualEncoderModel(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim)) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=None , **lowerCAmelCase__ : List[str]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase__ , text_model=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim)) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]=None , **lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = {"vision_model": vision_model, "text_model": text_model} SCREAMING_SNAKE_CASE_: Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim)) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=None , **lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase__ , text_model=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = after_output[0].numpy() SCREAMING_SNAKE_CASE_: Union[str, Any] = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(lowerCAmelCase__ , 1E-5) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase__ , text_model=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = model( input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase__) , vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_: List[str] = to_atuple(vision_model.config.image_size) SCREAMING_SNAKE_CASE_: Dict = to_atuple(vision_model.config.patch_size) SCREAMING_SNAKE_CASE_: int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) SCREAMING_SNAKE_CASE_: str = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) SCREAMING_SNAKE_CASE_: Dict = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase__) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float): SCREAMING_SNAKE_CASE_: int = np.abs((a - b)).max() self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , F"Difference between torch and flax is {diff} (>= {tol}).") def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Any = self.prepare_config_and_inputs() self.check_save_load(**lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Any = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCAmelCase__) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.get_pretrained_model_and_inputs() SCREAMING_SNAKE_CASE_: Dict = model_a(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = model_a(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = after_outputs[0].numpy() SCREAMING_SNAKE_CASE_: Optional[Any] = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(lowerCAmelCase__ , 1E-5) @require_tf class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert") SCREAMING_SNAKE_CASE_: List[str] = 13 SCREAMING_SNAKE_CASE_: int = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) SCREAMING_SNAKE_CASE_: List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) SCREAMING_SNAKE_CASE_: Union[str, Any] = random_attention_mask([batch_size, 4]) SCREAMING_SNAKE_CASE_: Dict = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Any = TFViTModel(lowerCAmelCase__ , name="vision_model") SCREAMING_SNAKE_CASE_: Optional[Any] = TFBertModel(lowerCAmelCase__ , name="text_model") return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Union[str, Any] = TFViTModelTester(self) SCREAMING_SNAKE_CASE_: Union[str, Any] = TFBertModelTester(self) SCREAMING_SNAKE_CASE_: str = vit_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_: List[Any] = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = vision_config_and_inputs ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ): List[Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Tuple): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. SCREAMING_SNAKE_CASE_: Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta") SCREAMING_SNAKE_CASE_: Optional[int] = 13 SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) SCREAMING_SNAKE_CASE_: Dict = random_attention_mask([batch_size, 4]) SCREAMING_SNAKE_CASE_: Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None , **lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase__ , text_model=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = model( input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase__) , vision_config.num_hidden_layers) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) SCREAMING_SNAKE_CASE_: Dict = to_atuple(vision_model.config.image_size) SCREAMING_SNAKE_CASE_: int = to_atuple(vision_model.config.patch_size) SCREAMING_SNAKE_CASE_: List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) SCREAMING_SNAKE_CASE_: Union[str, Any] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) SCREAMING_SNAKE_CASE_: List[Any] = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase__) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Optional[int] = TFDeiTModel(lowerCAmelCase__ , name="vision_model") SCREAMING_SNAKE_CASE_: Optional[int] = TFRobertaModel(lowerCAmelCase__ , name="text_model") return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Dict = TFDeiTModelTester(self) SCREAMING_SNAKE_CASE_: List[str] = TFRobertaModelTester(self) SCREAMING_SNAKE_CASE_: Optional[Any] = vit_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_: Union[str, Any] = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = vision_config_and_inputs ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ): Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: str = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert") SCREAMING_SNAKE_CASE_: List[str] = 13 SCREAMING_SNAKE_CASE_: Tuple = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) SCREAMING_SNAKE_CASE_: Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) SCREAMING_SNAKE_CASE_: Dict = random_attention_mask([batch_size, 4]) SCREAMING_SNAKE_CASE_: Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: List[str] = TFCLIPVisionModel(lowerCAmelCase__ , name="vision_model") SCREAMING_SNAKE_CASE_: List[str] = TFBertModel(lowerCAmelCase__ , name="text_model") return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: Optional[int] = TFCLIPVisionModelTester(self) SCREAMING_SNAKE_CASE_: Any = TFBertModelTester(self) SCREAMING_SNAKE_CASE_: Optional[int] = clip_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_: str = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = vision_config_and_inputs ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ): List[Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __lowercase ( unittest.TestCase ): """simple docstring""" @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Optional[int] = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian") SCREAMING_SNAKE_CASE_: Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") SCREAMING_SNAKE_CASE_: int = processor( text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np") SCREAMING_SNAKE_CASE_: List[str] = model(**lowerCAmelCase__) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) SCREAMING_SNAKE_CASE_: Optional[Any] = np.array([[1.228_4727, 0.310_4122]]) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCAmelCase__ , atol=1E-3))
13
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'ClapFeatureExtractor' SCREAMING_SNAKE_CASE__ = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self , _lowerCamelCase , _lowerCamelCase ): super().__init__(_lowerCamelCase , _lowerCamelCase ) def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: a :Optional[int] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if audios is not None: a :Tuple = self.feature_extractor( _lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if text is not None and audios is not None: a :Union[str, Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.tokenizer.model_input_names a :str = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
94
0
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor _lowerCamelCase : Union[str, Any] = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple: """simple docstring""" if isinstance(lowercase_ , torch.Tensor ): return image elif isinstance(lowercase_ , PIL.Image.Image ): A__ = [image] A__ = [trans(img.convert('''RGB''' ) ) for img in image] A__ = torch.stack(lowercase_ ) return image class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict) ->Optional[Any]: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM A__ = DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : str) ->Dict: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""") def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any]) ->int: '''simple docstring''' A__ = min(int(num_inference_steps * strength) , UpperCAmelCase__) A__ = max(num_inference_steps - init_timestep , 0) A__ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple=None) ->Any: '''simple docstring''' if not isinstance(UpperCAmelCase__ , (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase__)}""") A__ = image.to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) and len(UpperCAmelCase__) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(UpperCAmelCase__)}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""") A__ = init_latents.shape A__ = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__) # get latents print('''add noise to latents at timestep''' , UpperCAmelCase__) A__ = self.scheduler.add_noise(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) A__ = init_latents return latents @torch.no_grad() def __call__( self : Tuple , UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , UpperCAmelCase__ : float = 0.8 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(UpperCAmelCase__) # 2. Preprocess image A__ = preprocess(UpperCAmelCase__) # 3. set timesteps self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device) A__ , A__ = self.get_timesteps(UpperCAmelCase__ , UpperCAmelCase__ , self.device) A__ = timesteps[:1].repeat(UpperCAmelCase__) # 4. Prepare latent variables A__ = self.prepare_latents(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.unet.dtype , self.device , UpperCAmelCase__) A__ = latents # 5. Denoising loop for t in self.progress_bar(UpperCAmelCase__): # 1. predict noise model_output A__ = self.unet(UpperCAmelCase__ , UpperCAmelCase__).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A__ = self.scheduler.step( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ , ).prev_sample A__ = (image / 2 + 0.5).clamp(0 , 1) A__ = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": A__ = self.numpy_to_pil(UpperCAmelCase__) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=UpperCAmelCase__)
14
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=True ): """simple docstring""" model.train() a :str = model(UpperCAmelCase_ ) a :List[str] = F.mse_loss(UpperCAmelCase_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int=False ): """simple docstring""" set_seed(42 ) a :List[Any] = RegressionModel() a :Any = deepcopy(UpperCAmelCase_ ) a :Tuple = RegressionDataset(length=80 ) a :Tuple = DataLoader(UpperCAmelCase_ , batch_size=16 ) model.to(accelerator.device ) if sched: a :str = AdamW(params=model.parameters() , lr=1E-3 ) a :str = AdamW(params=ddp_model.parameters() , lr=1E-3 ) a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 ) a :List[str] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda UpperCAmelCase_ : epoch**0.65 ) # Make a copy of `model` if sched: a , a , a , a :List[Any] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: a , a :str = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a , a , a :str = get_training_setup(UpperCAmelCase_ ) # Use a single batch a , a :Dict = next(iter(UpperCAmelCase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a , a :int = accelerator.gather((ddp_input, ddp_target) ) a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: # Sync grads step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :Union[str, Any] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a , a , a :List[str] = get_training_setup(UpperCAmelCase_ ) # Use a single batch a , a :List[str] = next(iter(UpperCAmelCase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a , a :List[Any] = accelerator.gather((ddp_input, ddp_target) ) a , a :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: # Sync grads step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :Any = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=False ): """simple docstring""" a :Optional[int] = Accelerator( split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a , a , a :List[str] = get_training_setup(UpperCAmelCase_ ) for iteration, batch in enumerate(UpperCAmelCase_ ): a , a :List[Any] = batch.values() # Gather the distributed inputs and targs for the base model a , a :List[str] = accelerator.gather((ddp_input, ddp_target) ) a , a :List[str] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCAmelCase_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a :List[str] = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )] GradientState._reset_state() def __lowerCamelCase ( UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[int]=False ): """simple docstring""" a :Optional[Any] = Accelerator( split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a , a , a , a , a , a , a :Optional[Any] = get_training_setup(UpperCAmelCase_ , UpperCAmelCase_ ) for iteration, batch in enumerate(UpperCAmelCase_ ): a , a :int = batch.values() # Gather the distributed inputs and targs for the base model a , a :List[str] = accelerator.gather((ddp_input, ddp_target) ) a , a :str = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCAmelCase_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCAmelCase_ ): step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' a :Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCAmelCase_ )) if accelerator.num_processes > 1: check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def __lowerCamelCase ( ): """simple docstring""" a :Optional[Any] = Accelerator() a :int = RegressionDataset(length=80 ) a :List[str] = DataLoader(UpperCAmelCase_ , batch_size=16 ) a :List[Any] = RegressionDataset(length=96 ) a :Any = DataLoader(UpperCAmelCase_ , batch_size=16 ) a , a :Optional[int] = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCAmelCase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ ) if iteration < len(UpperCAmelCase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCAmelCase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ ) if batch_num < len(UpperCAmelCase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __lowerCamelCase ( ): """simple docstring""" a :Optional[int] = Accelerator() a :Optional[int] = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(UpperCAmelCase_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(UpperCAmelCase_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCAmelCase_ , UpperCAmelCase_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCAmelCase_ , UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : Tuple ): """simple docstring""" main() if __name__ == "__main__": main()
94
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE :int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Union[str, Any] = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
def __lowerCamelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int ): """simple docstring""" if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError('''The length of profit and weight must be same.''' ) if max_weight <= 0: raise ValueError('''max_weight must greater than zero.''' ) if any(p < 0 for p in profit ): raise ValueError('''Profit can not be negative.''' ) if any(w < 0 for w in weight ): raise ValueError('''Weight can not be negative.''' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. a :Optional[int] = [p / w for p, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )] # Creating a copy of the list and sorting profit/weight in ascending order a :List[Any] = sorted(UpperCAmelCase_ ) # declaring useful variables a :Dict = len(UpperCAmelCase_ ) a :Tuple = 0 a :List[Any] = 0 a :str = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight a :List[Any] = sorted_profit_by_weight[length - i - 1] a :Optional[Any] = profit_by_weight.index(UpperCAmelCase_ ) a :Optional[int] = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( '''Input profits, weights, and then max_weight (all positive ints) separated by ''' '''spaces.''' ) snake_case : Union[str, Any] = [int(x) for x in input('''Input profits separated by spaces: ''').split()] snake_case : Tuple = [int(x) for x in input('''Input weights separated by spaces: ''').split()] snake_case : str = int(input('''Max weight allowed: ''')) # Function Call calc_profit(profit, weight, max_weight)
94
0
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]: lowercase__ : Optional[int] = FileLock(str(tmpdir / '''foo.lock''' ) ) lowercase__ : List[Any] = FileLock(str(tmpdir / '''foo.lock''' ) ) lowercase__ : int = 0.0_1 with locka.acquire(): with pytest.raises(__lowerCamelCase ): lowercase__ : int = time.time() locka.acquire(__lowerCamelCase ) assert time.time() - _start > timeout def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]: lowercase__ : Any = '''a''' * 10_00 + '''.lock''' lowercase__ : str = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(__lowerCamelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 2_55 lowercase__ : Dict = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(__lowerCamelCase ): locka.acquire(0 )
16
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case : Dict = logging.get_logger(__name__) snake_case : Tuple = '''▁''' snake_case : Any = {'''vocab_file''': '''sentencepiece.bpe.model'''} snake_case : Tuple = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } snake_case : int = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask'] def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ): # Mask token behave like a normal word, i.e. include the space before it a :Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token a :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) a :str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token a :Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab a :List[str] = 1 a :Dict = len(self.sp_model ) + self.fairseq_offset a :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): a :List[str] = self.__dict__.copy() a :Optional[int] = None a :int = self.sp_model.serialized_model_proto() return state def __setstate__( self , _lowerCamelCase ): a :Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): a :Union[str, Any] = {} a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a :List[Any] = [self.cls_token_id] a :Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): a :int = [self.sep_token_id] a :int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE__ ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def SCREAMING_SNAKE_CASE__ ( self ): a :Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] a :Optional[Any] = self.sp_model.PieceToId(_lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Tuple = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if not os.path.isdir(_lowerCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return a :int = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , '''wb''' ) as fi: a :List[Any] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
94
0
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def _A ( UpperCamelCase_ : Tuple=None) -> Dict: '''simple docstring''' if subparsers is not None: __lowercase = subparsers.add_parser("env") else: __lowercase = argparse.ArgumentParser("Accelerate env command") parser.add_argument( "--config_file", default=UpperCamelCase_, help="The config file to use for the default values in the launching script.") if subparsers is not None: parser.set_defaults(func=UpperCamelCase_) return parser def _A ( UpperCamelCase_ : Union[str, Any]) -> Tuple: '''simple docstring''' __lowercase = torch.__version__ __lowercase = torch.cuda.is_available() __lowercase = is_xpu_available() __lowercase = is_npu_available() __lowercase = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(UpperCamelCase_): __lowercase = load_config_from_file(args.config_file).to_dict() __lowercase = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""", "PyTorch XPU available": str(UpperCamelCase_), "PyTorch NPU available": str(UpperCamelCase_), "System RAM": F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""", } if pt_cuda_available: __lowercase = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n") print("\n".join([F"""- {prop}: {val}""" for prop, val in info.items()])) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:") __lowercase = ( "\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()]) if isinstance(UpperCamelCase_, UpperCamelCase_) else F"""\t{accelerate_config}""" ) print(UpperCamelCase_) __lowercase = accelerate_config return info def _A ( ) -> int: '''simple docstring''' __lowercase = env_command_parser() __lowercase = parser.parse_args() env_command(UpperCamelCase_) return 0 if __name__ == "__main__": raise SystemExit(main())
17
def __lowerCamelCase ( UpperCAmelCase_ : int = 1000 ): """simple docstring""" a , a :int = 1, 1 a :Any = 2 while True: a :Optional[int] = 0 a :str = fa + fa a , a :List[Any] = fa, f index += 1 for _ in str(UpperCAmelCase_ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
94
0
import copy import random from transformers import CLIPTokenizer class a__ ( A__ ): def __init__( self : Tuple,*_A : int,**_A : str ): """simple docstring""" super().__init__(*_A,**_A ) SCREAMING_SNAKE_CASE_ : Any = {} def __UpperCamelCase ( self : int,_A : Tuple,*_A : Optional[int],**_A : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = super().add_tokens(_A,*_A,**_A ) if num_added_tokens == 0: raise ValueError( F'The tokenizer already contains the token {placeholder_token}. Please pass a different' " `placeholder_token` that is not already in the tokenizer." ) def __UpperCamelCase ( self : int,_A : Any,*_A : Union[str, Any],_A : Union[str, Any]=1,**_A : Any ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(_A,*_A,**_A ) output.append(_A ) else: SCREAMING_SNAKE_CASE_ : int = [] for i in range(_A ): SCREAMING_SNAKE_CASE_ : Tuple = placeholder_token + F'_{i}' self.try_adding_tokens(_A,*_A,**_A ) output.append(_A ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'The tokenizer already has placeholder token {token} that can get confused with' F' {placeholder_token}keep placeholder tokens independent' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = output def __UpperCamelCase ( self : Any,_A : List[Any],_A : str=False,_A : List[Any]=1.0 ): """simple docstring""" if isinstance(_A,_A ): SCREAMING_SNAKE_CASE_ : Dict = [] for i in range(len(_A ) ): output.append(self.replace_placeholder_tokens_in_text(text[i],vector_shuffle=_A ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: SCREAMING_SNAKE_CASE_ : str = self.token_map[placeholder_token] SCREAMING_SNAKE_CASE_ : Dict = tokens[: 1 + int(len(_A ) * prop_tokens_to_load )] if vector_shuffle: SCREAMING_SNAKE_CASE_ : List[str] = copy.copy(_A ) random.shuffle(_A ) SCREAMING_SNAKE_CASE_ : List[str] = text.replace(_A," ".join(_A ) ) return text def __call__( self : List[Any],_A : Any,*_A : List[str],_A : List[str]=False,_A : Dict=1.0,**_A : Dict ): """simple docstring""" return super().__call__( self.replace_placeholder_tokens_in_text( _A,vector_shuffle=_A,prop_tokens_to_load=_A ),*_A,**_A,) def __UpperCamelCase ( self : Any,_A : Tuple,*_A : Dict,_A : str=False,_A : List[str]=1.0,**_A : Union[str, Any] ): """simple docstring""" return super().encode( self.replace_placeholder_tokens_in_text( _A,vector_shuffle=_A,prop_tokens_to_load=_A ),*_A,**_A,)
18
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=1000 , ): a :str = parent a :str = batch_size a :List[Any] = seq_length a :Union[str, Any] = is_training a :str = use_input_mask a :Tuple = use_token_type_ids a :Optional[int] = use_labels a :Union[str, Any] = vocab_size a :Optional[Any] = hidden_size a :Any = num_hidden_layers a :Optional[int] = num_attention_heads a :Tuple = intermediate_size a :Dict = hidden_act a :str = hidden_dropout_prob a :List[Any] = attention_probs_dropout_prob a :List[Any] = max_position_embeddings a :List[str] = type_vocab_size a :List[Any] = type_sequence_label_size a :Union[str, Any] = initializer_range a :Optional[Any] = num_labels a :Optional[int] = num_choices a :Union[str, Any] = scope a :List[str] = range_bbox def SCREAMING_SNAKE_CASE__ ( self ): a :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment a :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a :List[Any] = bbox[i, j, 3] a :List[str] = bbox[i, j, 1] a :List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: a :Dict = bbox[i, j, 2] a :Dict = bbox[i, j, 0] a :Any = t a :Optional[Any] = tf.convert_to_tensor(_lowerCamelCase ) a :int = None if self.use_input_mask: a :List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a :Optional[int] = None if self.use_token_type_ids: a :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a :List[Any] = None a :List[Any] = None a :List[Any] = None if self.use_labels: a :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a :List[str] = ids_tensor([self.batch_size] , self.num_choices ) a :List[Any] = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = TFLayoutLMModel(config=_lowerCamelCase ) a :Dict = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase , token_type_ids=_lowerCamelCase ) a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :List[str] = TFLayoutLMForMaskedLM(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = self.num_labels a :List[Any] = TFLayoutLMForSequenceClassification(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :int = self.num_labels a :Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCamelCase ) a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[Any] = TFLayoutLMForQuestionAnswering(config=_lowerCamelCase ) a :Optional[int] = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) :List[Any] = config_and_inputs a :Union[str, Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class _snake_case ( _snake_case , _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE__ = ( { 'feature-extraction': TFLayoutLMModel, 'fill-mask': TFLayoutLMForMaskedLM, 'text-classification': TFLayoutLMForSequenceClassification, 'token-classification': TFLayoutLMForTokenClassification, 'zero-shot': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = 10 def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = TFLayoutLMModelTester(self ) a :Dict = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ): a :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a :str = TFLayoutLMModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) @unittest.skip('''Onnx compliancy broke with TF 2.10''' ) def SCREAMING_SNAKE_CASE__ ( self ): pass def __lowerCamelCase ( ): """simple docstring""" a :Tuple = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 a :Any = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 a :List[str] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 a :List[str] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) a :Any = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class _snake_case ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' ) a , a , a , a , a :Optional[Any] = prepare_layoutlm_batch_inputs() # forward pass a :Tuple = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) # test the sequence output on [0, :3, :3] a :List[str] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-3 ) ) # test the pooled output on [1, :3] a :List[str] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCamelCase , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized sequence classification head a :str = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 ) a , a , a , a , a :List[str] = prepare_layoutlm_batch_inputs() # forward pass a :List[Any] = model( input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar a :Union[str, Any] = outputs.loss a :Optional[Any] = (2,) self.assertEqual(loss.shape , _lowerCamelCase ) # test the shape of the logits a :Any = outputs.logits a :Tuple = (2, 2) self.assertEqual(logits.shape , _lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized token classification head a :Dict = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 ) a , a , a , a , a :Dict = prepare_layoutlm_batch_inputs() # forward pass a :List[Any] = model( input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) # test the shape of the logits a :Optional[Any] = outputs.logits a :List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self ): # initialize model with randomly initialized token classification head a :List[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' ) a , a , a , a , a :Any = prepare_layoutlm_batch_inputs() # forward pass a :str = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) # test the shape of the logits a :Optional[int] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _lowerCamelCase ) self.assertEqual(outputs.end_logits.shape , _lowerCamelCase )
94
0
def lowerCamelCase_ ( lowerCamelCase__ ): assert ( isinstance(lowerCamelCase__ , lowerCamelCase__ ) and number_of_steps > 0 ), F'number_of_steps needs to be positive integer, your input {number_of_steps}' if number_of_steps == 1: return 1 lowerCamelCase_ , lowerCamelCase_ = 1, 1 for _ in range(number_of_steps - 1 ): lowerCamelCase_ , lowerCamelCase_ = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
19
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" while b: a , a :Optional[Any] = b, a % b return a def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b ) def __lowerCamelCase ( ): """simple docstring""" print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
94
0