code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: UpperCAmelCase_ : Any = None UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : List[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase_ : List[Any] = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } UpperCAmelCase_ : List[Any] = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off UpperCAmelCase_ : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = ["input_ids", "attention_mask"] __UpperCamelCase = MBartTokenizer __UpperCamelCase = [] __UpperCamelCase = [] def __init__( self : Any , lowercase_ : str=None , lowercase_ : int=None , lowercase_ : List[Any]="<s>" , lowercase_ : Any="</s>" , lowercase_ : Any="</s>" , lowercase_ : List[str]="<s>" , lowercase_ : List[Any]="<unk>" , lowercase_ : List[Any]="<pad>" , lowercase_ : List[str]="<mask>" , lowercase_ : Optional[int]=None , lowercase_ : int=None , lowercase_ : List[Any]=None , **lowercase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else mask_token super().__init__( vocab_file=lowercase_ , tokenizer_file=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file SCREAMING_SNAKE_CASE_ : Any = False if not self.vocab_file else True SCREAMING_SNAKE_CASE_ : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens}) SCREAMING_SNAKE_CASE_ : Any = { lang_code: self.convert_tokens_to_ids(lowercase_) for lang_code in FAIRSEQ_LANGUAGE_CODES } SCREAMING_SNAKE_CASE_ : Optional[Any] = src_lang if src_lang is not None else '''en_XX''' SCREAMING_SNAKE_CASE_ : Any = self.convert_tokens_to_ids(self._src_lang) SCREAMING_SNAKE_CASE_ : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' return self._src_lang @src_lang.setter def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[str] , lowercase_ : Optional[str] , **lowercase_ : List[Any]): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''') SCREAMING_SNAKE_CASE_ : List[str] = src_lang SCREAMING_SNAKE_CASE_ : Tuple = self(lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.convert_tokens_to_ids(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tgt_lang_id return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : str = "en_XX" , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "ro_RO" , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = src_lang SCREAMING_SNAKE_CASE_ : Tuple = tgt_lang return super().prepare_seqaseq_batch(lowercase_ , lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.convert_tokens_to_ids(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = [] SCREAMING_SNAKE_CASE_ : Optional[int] = [self.eos_token_id, self.cur_lang_code] SCREAMING_SNAKE_CASE_ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens) SCREAMING_SNAKE_CASE_ : str = self.convert_ids_to_tokens(self.suffix_tokens) SCREAMING_SNAKE_CASE_ : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.convert_tokens_to_ids(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = [] SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens) SCREAMING_SNAKE_CASE_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens) SCREAMING_SNAKE_CASE_ : Dict = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''') if not os.path.isdir(lowercase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory.') return SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_): copyfile(self.vocab_file , lowercase_) return (out_vocab_file,)
91
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Dict = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16''' self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : str = '''fp16''' self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
91
1
"""simple docstring""" from __future__ import annotations from random import choice def _A (__a ) -> Optional[int]: """simple docstring""" return choice(__a ) def _A (__a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = random_pivot(__a ) # partition based on pivot # linear time SCREAMING_SNAKE_CASE_ : Any = [e for e in lst if e < pivot] SCREAMING_SNAKE_CASE_ : Tuple = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(__a ) == k - 1: return pivot # pivot is in elements bigger than k elif len(__a ) < k - 1: return kth_number(__a , k - len(__a ) - 1 ) # pivot is in elements smaller than k else: return kth_number(__a , __a ) if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable UpperCAmelCase_ : str = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Dict = ["""GPTNeoXTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[str] = [ """GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXForCausalLM""", """GPTNeoXForQuestionAnswering""", """GPTNeoXForSequenceClassification""", """GPTNeoXForTokenClassification""", """GPTNeoXLayer""", """GPTNeoXModel""", """GPTNeoXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" from typing import Any class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = data SCREAMING_SNAKE_CASE_ : int = None class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = None def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.head while temp is not None: print(temp.data , end=''' ''') SCREAMING_SNAKE_CASE_ : Optional[Any] = temp.next print() def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = Node(lowercase_) SCREAMING_SNAKE_CASE_ : int = self.head SCREAMING_SNAKE_CASE_ : List[str] = new_node def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple): '''simple docstring''' if node_data_a == node_data_a: return else: SCREAMING_SNAKE_CASE_ : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: SCREAMING_SNAKE_CASE_ : List[Any] = node_a.next SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.head while node_a is not None and node_a.data != node_data_a: SCREAMING_SNAKE_CASE_ : Union[str, Any] = node_a.next if node_a is None or node_a is None: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = node_a.data, node_a.data if __name__ == "__main__": UpperCAmelCase_ : Optional[Any] = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
91
"""simple docstring""" import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py UpperCAmelCase_ : Optional[int] = """src/transformers""" UpperCAmelCase_ : Tuple = """docs/source/en""" UpperCAmelCase_ : Optional[Any] = """.""" def _A (__a , __a , __a ) -> Dict: """simple docstring""" with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Dict = f.readlines() # Find the start prompt. SCREAMING_SNAKE_CASE_ : List[Any] = 0 while not lines[start_index].startswith(__a ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : Tuple = start_index while not lines[end_index].startswith(__a ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | UpperCAmelCase_ : Optional[Any] = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. UpperCAmelCase_ : int = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") UpperCAmelCase_ : Dict = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. UpperCAmelCase_ : int = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase_ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH) def _A (__a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __a ) return [m.group(0 ) for m in matches] def _A (__a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = 2 if text == '''✅''' or text == '''❌''' else len(__a ) SCREAMING_SNAKE_CASE_ : Tuple = (width - text_length) // 2 SCREAMING_SNAKE_CASE_ : Tuple = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ : Tuple = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } SCREAMING_SNAKE_CASE_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a ) # Let's lookup through all transformers object (once). for attr_name in dir(__a ): SCREAMING_SNAKE_CASE_ : Any = None if attr_name.endswith('''Tokenizer''' ): SCREAMING_SNAKE_CASE_ : Dict = slow_tokenizers SCREAMING_SNAKE_CASE_ : Dict = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = fast_tokenizers SCREAMING_SNAKE_CASE_ : Optional[Any] = attr_name[:-13] elif _re_tf_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : int = tf_models SCREAMING_SNAKE_CASE_ : Dict = _re_tf_models.match(__a ).groups()[0] elif _re_flax_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : Any = flax_models SCREAMING_SNAKE_CASE_ : Tuple = _re_flax_models.match(__a ).groups()[0] elif _re_pt_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : str = pt_models SCREAMING_SNAKE_CASE_ : int = _re_pt_models.match(__a ).groups()[0] if lookup_dict is not None: while len(__a ) > 0: if attr_name in model_name_to_prefix.values(): SCREAMING_SNAKE_CASE_ : List[str] = True break # Try again after removing the last word in the name SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(camel_case_split(__a )[:-1] ) # Let's build that table! SCREAMING_SNAKE_CASE_ : Any = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) SCREAMING_SNAKE_CASE_ : Any = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). SCREAMING_SNAKE_CASE_ : List[str] = [len(__a ) + 2 for c in columns] SCREAMING_SNAKE_CASE_ : str = max([len(__a ) for name in model_names] ) + 2 # Build the table per se SCREAMING_SNAKE_CASE_ : List[Any] = '''|''' + '''|'''.join([_center_text(__a , __a ) for c, w in zip(__a , __a )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" SCREAMING_SNAKE_CASE_ : Union[str, Any] = {True: '''✅''', False: '''❌'''} for name in model_names: SCREAMING_SNAKE_CASE_ : str = model_name_to_prefix[name] SCREAMING_SNAKE_CASE_ : int = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__a , __a ) for l, w in zip(__a , __a )] ) + "|\n" return table def _A (__a=False ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = _find_text_in_file( filename=os.path.join(__a , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) SCREAMING_SNAKE_CASE_ : Tuple = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__a , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ : Any = parser.parse_args() check_model_table(args.fix_and_overwrite)
91
1
"""simple docstring""" import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=13 , lowercase_ : Union[str, Any]=30 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=3 , lowercase_ : Union[str, Any]=True , lowercase_ : int=True , lowercase_ : Tuple=32 , lowercase_ : Tuple=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Dict=37 , lowercase_ : Optional[int]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=10 , lowercase_ : Dict=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = parent SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE_ : List[str] = image_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size SCREAMING_SNAKE_CASE_ : int = num_channels SCREAMING_SNAKE_CASE_ : Optional[int] = is_training SCREAMING_SNAKE_CASE_ : Any = use_labels SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE_ : int = num_hidden_layers SCREAMING_SNAKE_CASE_ : int = num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size SCREAMING_SNAKE_CASE_ : Dict = hidden_act SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : str = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Any = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_ : Any = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE_ : Tuple = num_patches + 1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ : Union[str, Any] = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , ) return config, pixel_values def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxViTModel(config=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_ : str = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE_ : Tuple = (self.patch_size, self.patch_size) SCREAMING_SNAKE_CASE_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Any , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.type_sequence_label_size SCREAMING_SNAKE_CASE_ : Dict = FlaxViTForImageClassification(config=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images SCREAMING_SNAKE_CASE_ : int = 1 SCREAMING_SNAKE_CASE_ : List[str] = FlaxViTForImageClassification(lowercase_) SCREAMING_SNAKE_CASE_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ : Any = model(lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : List[str] = config_and_inputs SCREAMING_SNAKE_CASE_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxViTModelTester(self) SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : str = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ : Tuple = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_) @jax.jit def model_jitted(lowercase_ : int , **lowercase_ : Optional[Any]): return model(pixel_values=lowercase_ , **lowercase_) with self.subTest('''JIT Enabled'''): SCREAMING_SNAKE_CASE_ : Tuple = model_jitted(**lowercase_).to_tuple() with self.subTest('''JIT Disabled'''): with jax.disable_jit(): SCREAMING_SNAKE_CASE_ : Optional[int] = model_jitted(**lowercase_).to_tuple() self.assertEqual(len(lowercase_) , len(lowercase_)) for jitted_output, output in zip(lowercase_ , lowercase_): self.assertEqual(jitted_output.shape , output.shape) @slow def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE_ : List[Any] = model_class_name.from_pretrained('''google/vit-base-patch16-224''') SCREAMING_SNAKE_CASE_ : List[str] = model(np.ones((1, 3, 224, 224))) self.assertIsNotNone(lowercase_)
91
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : List[str]=13 , lowercase_ : int=7 , lowercase_ : Any=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Dict=99 , lowercase_ : Union[str, Any]=24 , lowercase_ : int=2 , lowercase_ : List[str]=6 , lowercase_ : Any=37 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Any=2 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Optional[int]=None , lowercase_ : str=1000 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = parent SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length SCREAMING_SNAKE_CASE_ : List[Any] = is_training SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE_ : Optional[Any] = use_token_type_ids SCREAMING_SNAKE_CASE_ : int = use_labels SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size SCREAMING_SNAKE_CASE_ : List[str] = hidden_size SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size SCREAMING_SNAKE_CASE_ : Tuple = hidden_act SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Any = initializer_range SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels SCREAMING_SNAKE_CASE_ : Tuple = scope SCREAMING_SNAKE_CASE_ : Optional[int] = range_bbox def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 1] SCREAMING_SNAKE_CASE_ : str = t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE_ : List[str] = bbox[i, j, 2] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 0] SCREAMING_SNAKE_CASE_ : List[str] = t SCREAMING_SNAKE_CASE_ : Tuple = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ : List[str] = None SCREAMING_SNAKE_CASE_ : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ : Any = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = LiltModel(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : int = model(lowercase_ , bbox=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Optional[Any] = LiltForTokenClassification(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Tuple = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = LiltForQuestionAnswering(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : List[str] = config_and_inputs SCREAMING_SNAKE_CASE_ : str = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __UpperCamelCase = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str): '''simple docstring''' return True def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = LiltModelTester(self) SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_ : Dict = type self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Optional[int] = LiltModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) @require_torch @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(lowercase_) SCREAMING_SNAKE_CASE_ : str = torch.tensor([[1, 2]] , device=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Dict = model(input_ids=lowercase_ , bbox=lowercase_) SCREAMING_SNAKE_CASE_ : str = torch.Size([1, 2, 768]) SCREAMING_SNAKE_CASE_ : Dict = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=lowercase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowercase_) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1e-3))
91
1
"""simple docstring""" def _A (__a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = 0 SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(__a ) for i in range(n - 1 ): for j in range(i + 1 , __a ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _A (__a ) -> int: """simple docstring""" if len(__a ) <= 1: return arr, 0 SCREAMING_SNAKE_CASE_ : int = len(__a ) // 2 SCREAMING_SNAKE_CASE_ : Tuple = arr[0:mid] SCREAMING_SNAKE_CASE_ : List[str] = arr[mid:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = count_inversions_recursive(__a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = count_inversions_recursive(__a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = _count_cross_inversions(__a , __a ) SCREAMING_SNAKE_CASE_ : str = inversion_p + inversions_q + cross_inversions return c, num_inversions def _A (__a , __a ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = [] SCREAMING_SNAKE_CASE_ : Optional[int] = 0 while i < len(__a ) and j < len(__a ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__a ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__a ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) SCREAMING_SNAKE_CASE_ : Dict = count_inversions_bf(__a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = count_inversions_recursive(__a ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''' , __a ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() SCREAMING_SNAKE_CASE_ : Optional[int] = count_inversions_bf(__a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = count_inversions_recursive(__a ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , __a ) # an empty list should also have zero inversions SCREAMING_SNAKE_CASE_ : int = [] SCREAMING_SNAKE_CASE_ : Tuple = count_inversions_bf(__a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = count_inversions_recursive(__a ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , __a ) if __name__ == "__main__": main()
91
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCAmelCase_ : Dict = logging.getLogger(__name__) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser( description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)""" ) parser.add_argument( """--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset.""" ) parser.add_argument( """--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file.""" ) parser.add_argument("""--vocab_size""", default=30522, type=int) UpperCAmelCase_ : Optional[Any] = parser.parse_args() logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file, """rb""") as fp: UpperCAmelCase_ : Union[str, Any] = pickle.load(fp) logger.info("""Counting occurrences for MLM.""") UpperCAmelCase_ : Any = Counter() for tk_ids in data: counter.update(tk_ids) UpperCAmelCase_ : List[Any] = [0] * args.vocab_size for k, v in counter.items(): UpperCAmelCase_ : Dict = v logger.info(f'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, """wb""") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ : List[str] = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Union[str, Any] = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[Any] = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
91
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) def _A (__a , __a ) -> Tuple: """simple docstring""" try: with open(__a , '''rb''' ) as flax_state_f: SCREAMING_SNAKE_CASE_ : Optional[int] = from_bytes(__a , flax_state_f.read() ) except UnpicklingError as e: try: with open(__a ) as f: if f.read().startswith('''version''' ): raise OSError( '''You seem to have cloned a repository without having git-lfs installed. Please''' ''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the''' ''' folder you cloned.''' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(__a , __a ) def _A (__a , __a ) -> Tuple: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights SCREAMING_SNAKE_CASE_ : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , __a ) ).values() if any(__a ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.tree_util.tree_map( lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __a ) SCREAMING_SNAKE_CASE_ : int = '''''' SCREAMING_SNAKE_CASE_ : str = flatten_dict(__a , sep='''.''' ) SCREAMING_SNAKE_CASE_ : List[Any] = pt_model.state_dict() # keep track of unexpected & missing keys SCREAMING_SNAKE_CASE_ : str = [] SCREAMING_SNAKE_CASE_ : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple.split('''.''' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple_array[:-1] + ['''weight'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.transpose(__a , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": SCREAMING_SNAKE_CASE_ : Tuple = flax_key_tuple_array[:-1] + ['''weight'''] SCREAMING_SNAKE_CASE_ : Optional[int] = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": SCREAMING_SNAKE_CASE_ : Optional[int] = flax_key_tuple_array[:-1] + ['''weight'''] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(__a ): SCREAMING_SNAKE_CASE_ : List[str] = ( flax_key_tuple_string.replace('''_0''' , '''.0''' ) .replace('''_1''' , '''.1''' ) .replace('''_2''' , '''.2''' ) .replace('''_3''' , '''.3''' ) .replace('''_4''' , '''.4''' ) .replace('''_5''' , '''.5''' ) .replace('''_6''' , '''.6''' ) .replace('''_7''' , '''.7''' ) .replace('''_8''' , '''.8''' ) .replace('''_9''' , '''.9''' ) ) SCREAMING_SNAKE_CASE_ : Optional[Any] = '''.'''.join(__a ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(__a ) if not isinstance(__a , np.ndarray ) else flax_tensor SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(__a ) # remove from missing keys missing_keys.remove(__a ) else: # weight is not expected by PyTorch model unexpected_keys.append(__a ) pt_model.load_state_dict(__a ) # re-transform missing_keys to list SCREAMING_SNAKE_CASE_ : int = list(__a ) if len(__a ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) if len(__a ) > 0: logger.warning( f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) return pt_model
91
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase_ : Optional[Any] = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", }, """tokenizer_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""", }, } UpperCAmelCase_ : Any = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } UpperCAmelCase_ : Union[str, Any] = """▁""" class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = AlbertTokenizer def __init__( self : List[Any] , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Dict=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[int]=False , lowercase_ : List[str]="[CLS]" , lowercase_ : str="[SEP]" , lowercase_ : Dict="<unk>" , lowercase_ : int="[SEP]" , lowercase_ : List[str]="<pad>" , lowercase_ : Optional[Any]="[CLS]" , lowercase_ : Optional[int]="[MASK]" , **lowercase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = ( AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_) if isinstance(lowercase_ , lowercase_) else mask_token ) super().__init__( lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Optional[int] = do_lower_case SCREAMING_SNAKE_CASE_ : Optional[Any] = remove_space SCREAMING_SNAKE_CASE_ : List[str] = keep_accents SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file SCREAMING_SNAKE_CASE_ : List[str] = False if not self.vocab_file else True def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''') if not os.path.isdir(lowercase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_): copyfile(self.vocab_file , lowercase_) return (out_vocab_file,)
91
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Any = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "openai-gpt" __UpperCamelCase = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[str] , lowercase_ : List[str]=40478 , lowercase_ : List[str]=512 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=1e-5 , lowercase_ : int=0.02 , lowercase_ : Optional[int]="cls_index" , lowercase_ : Any=True , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=0.1 , **lowercase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE_ : Tuple = n_positions SCREAMING_SNAKE_CASE_ : Optional[int] = n_embd SCREAMING_SNAKE_CASE_ : Dict = n_layer SCREAMING_SNAKE_CASE_ : Any = n_head SCREAMING_SNAKE_CASE_ : Union[str, Any] = afn SCREAMING_SNAKE_CASE_ : int = resid_pdrop SCREAMING_SNAKE_CASE_ : List[str] = embd_pdrop SCREAMING_SNAKE_CASE_ : Union[str, Any] = attn_pdrop SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range SCREAMING_SNAKE_CASE_ : List[str] = summary_type SCREAMING_SNAKE_CASE_ : Tuple = summary_use_proj SCREAMING_SNAKE_CASE_ : Union[str, Any] = summary_activation SCREAMING_SNAKE_CASE_ : Any = summary_first_dropout SCREAMING_SNAKE_CASE_ : List[str] = summary_proj_to_labels super().__init__(**lowercase_)
91
1
"""simple docstring""" import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def _A (__a , __a , __a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(__a ): os.makedirs(__a ) SCREAMING_SNAKE_CASE_ : Dict = model.state_dict() def to_tf_var_name(__a ): for patt, repl in iter(__a ): SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace(__a , __a ) return f'bert/{name}' def create_tf_var(__a , __a , __a ): SCREAMING_SNAKE_CASE_ : List[str] = tf.dtypes.as_dtype(tensor.dtype ) SCREAMING_SNAKE_CASE_ : Optional[int] = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: SCREAMING_SNAKE_CASE_ : Any = to_tf_var_name(__a ) SCREAMING_SNAKE_CASE_ : Any = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): SCREAMING_SNAKE_CASE_ : List[str] = torch_tensor.T SCREAMING_SNAKE_CASE_ : List[Any] = create_tf_var(tensor=__a , name=__a , session=__a ) tf.keras.backend.set_value(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[int] = session.run(__a ) print(f'Successfully created {tf_name}: {np.allclose(__a , __a )}' ) SCREAMING_SNAKE_CASE_ : str = tf.train.Saver(tf.trainable_variables() ) saver.save(__a , os.path.join(__a , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def _A (__a=None ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__a , required=__a , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=__a , default=__a , required=__a , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=__a , required=__a , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=__a , required=__a , help='''Directory in which to save tensorflow model''' ) SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args(__a ) SCREAMING_SNAKE_CASE_ : Dict = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
91
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[str] , *lowercase_ : Dict , **lowercase_ : Union[str, Any]): '''simple docstring''' warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_)
91
1
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = TextToVideoSDPipeline __UpperCamelCase = TEXT_TO_IMAGE_PARAMS __UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __UpperCamelCase = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextModel(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') SCREAMING_SNAKE_CASE_ : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int]=0): '''simple docstring''' if str(lowercase_).startswith('''mps'''): SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(lowercase_) else: SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=lowercase_).manual_seed(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : List[Any] = TextToVideoSDPipeline(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = sd_pipe.to(lowercase_) sd_pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_inputs(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = '''np''' SCREAMING_SNAKE_CASE_ : List[Any] = sd_pipe(**lowercase_).frames SCREAMING_SNAKE_CASE_ : Union[str, Any] = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) SCREAMING_SNAKE_CASE_ : List[str] = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase_ , expected_max_diff=3e-3) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_ , expected_max_diff=1e-2) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''') def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''') def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''') SCREAMING_SNAKE_CASE_ : Optional[int] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''') SCREAMING_SNAKE_CASE_ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE_ : Tuple = pipe.to('''cuda''') SCREAMING_SNAKE_CASE_ : List[str] = '''Spiderman is surfing''' SCREAMING_SNAKE_CASE_ : Any = torch.Generator(device='''cpu''').manual_seed(0) SCREAMING_SNAKE_CASE_ : str = pipe(lowercase_ , generator=lowercase_ , num_inference_steps=25 , output_type='''pt''').frames SCREAMING_SNAKE_CASE_ : Optional[Any] = video_frames.cpu().numpy() assert np.abs(expected_video - video).mean() < 5e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''') SCREAMING_SNAKE_CASE_ : List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''') SCREAMING_SNAKE_CASE_ : Tuple = pipe.to('''cuda''') SCREAMING_SNAKE_CASE_ : Tuple = '''Spiderman is surfing''' SCREAMING_SNAKE_CASE_ : int = torch.Generator(device='''cpu''').manual_seed(0) SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='''pt''').frames SCREAMING_SNAKE_CASE_ : List[Any] = video_frames.cpu().numpy() assert np.abs(expected_video - video).mean() < 5e-2
91
"""simple docstring""" import random from typing import Any def _A (__a ) -> list[Any]: """simple docstring""" for _ in range(len(__a ) ): SCREAMING_SNAKE_CASE_ : Optional[int] = random.randint(0 , len(__a ) - 1 ) SCREAMING_SNAKE_CASE_ : Tuple = random.randint(0 , len(__a ) - 1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = data[b], data[a] return data if __name__ == "__main__": UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5, 6, 7] UpperCAmelCase_ : Dict = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
91
1
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : Dict , lowercase_ : Union[str, Any]=sys.maxsize): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = '''bilinear''' SCREAMING_SNAKE_CASE_ : Optional[int] = max_size SCREAMING_SNAKE_CASE_ : Any = short_edge_length def __call__( self : str , lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for img in imgs: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = img.shape[:2] # later: provide list and randomly choose index for resize SCREAMING_SNAKE_CASE_ : Any = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img SCREAMING_SNAKE_CASE_ : Dict = size * 1.0 / min(lowercase_ , lowercase_) if h < w: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = size, scale * w else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = scale * h, size if max(lowercase_ , lowercase_) > self.max_size: SCREAMING_SNAKE_CASE_ : List[str] = self.max_size * 1.0 / max(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Any = newh * scale SCREAMING_SNAKE_CASE_ : str = neww * scale SCREAMING_SNAKE_CASE_ : int = int(neww + 0.5) SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(newh + 0.5) if img.dtype == np.uinta: SCREAMING_SNAKE_CASE_ : Tuple = Image.fromarray(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) SCREAMING_SNAKE_CASE_ : int = np.asarray(lowercase_) else: SCREAMING_SNAKE_CASE_ : int = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw SCREAMING_SNAKE_CASE_ : int = nn.functional.interpolate( lowercase_ , (newh, neww) , mode=self.interp_method , align_corners=lowercase_).squeeze(0) img_augs.append(lowercase_) return img_augs class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) SCREAMING_SNAKE_CASE_ : Any = cfg.INPUT.FORMAT SCREAMING_SNAKE_CASE_ : List[str] = cfg.SIZE_DIVISIBILITY SCREAMING_SNAKE_CASE_ : List[str] = cfg.PAD_VALUE SCREAMING_SNAKE_CASE_ : List[Any] = cfg.INPUT.MAX_SIZE_TEST SCREAMING_SNAKE_CASE_ : List[Any] = cfg.MODEL.DEVICE SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) SCREAMING_SNAKE_CASE_ : Dict = lambda lowercase_: (x - self.pixel_mean) / self.pixel_std def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = tuple(max(lowercase_) for s in zip(*[img.shape for img in images])) SCREAMING_SNAKE_CASE_ : List[str] = [im.shape[-2:] for im in images] SCREAMING_SNAKE_CASE_ : int = [ nn.functional.pad( lowercase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(lowercase_ , lowercase_) ] return torch.stack(lowercase_), torch.tensor(lowercase_) def __call__( self : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any]=False): '''simple docstring''' with torch.no_grad(): if not isinstance(lowercase_ , lowercase_): SCREAMING_SNAKE_CASE_ : Optional[Any] = [images] if single_image: assert len(lowercase_) == 1 for i in range(len(lowercase_)): if isinstance(images[i] , torch.Tensor): images.insert(lowercase_ , images.pop(lowercase_).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( lowercase_ , torch.as_tensor(img_tensorize(images.pop(lowercase_) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([im.shape[:2] for im in images]) SCREAMING_SNAKE_CASE_ : Optional[int] = self.aug(lowercase_) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.normalizer(lowercase_) for x in images] # now pad them to do the following operations SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.pad(lowercase_) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad SCREAMING_SNAKE_CASE_ : Optional[int] = torch.true_divide(lowercase_ , lowercase_) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _A (__a , __a ) -> Dict: """simple docstring""" boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _A (__a , __a ) -> Any: """simple docstring""" assert torch.isfinite(__a ).all(), "Box tensor contains infinite or NaN!" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = box_size tensor[:, 0].clamp_(min=0 , max=__a ) tensor[:, 1].clamp_(min=0 , max=__a ) tensor[:, 2].clamp_(min=0 , max=__a ) tensor[:, 3].clamp_(min=0 , max=__a )
91
"""simple docstring""" import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _A (__a , __a , __a ) -> Dict: """simple docstring""" if gpta_config_file == "": SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaConfig() else: SCREAMING_SNAKE_CASE_ : Tuple = GPTaConfig.from_json_file(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaModel(__a ) # Load weights from numpy load_tf_weights_in_gpta(__a , __a , __a ) # Save pytorch-model SCREAMING_SNAKE_CASE_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME SCREAMING_SNAKE_CASE_ : List[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , __a ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(__a , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--gpt2_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
91
1
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__) UpperCAmelCase_ : Any = 50 # max width of layer names UpperCAmelCase_ : List[Any] = 70 # max width of quantizer names def _A (__a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=__a , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=__a , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=__a , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=__a , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=__a , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=__a , type=__a , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=__a , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def _A (__a ) -> List[str]: """simple docstring""" if args.calibrator == "max": SCREAMING_SNAKE_CASE_ : Optional[int] = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''histogram''' elif args.calibrator == "mse": SCREAMING_SNAKE_CASE_ : List[str] = '''histogram''' else: raise ValueError(f'Invalid calibrator {args.calibrator}' ) SCREAMING_SNAKE_CASE_ : List[str] = QuantDescriptor(num_bits=args.aprec , calib_method=__a ) SCREAMING_SNAKE_CASE_ : Dict = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(__a ) quant_nn.QuantLinear.set_default_quant_desc_weight(__a ) def _A (__a , __a , __a=False , __a=False ) -> Optional[Any]: """simple docstring""" logger.info('''Configuring Model for Quantization''' ) logger.info(f'using quantization package {pytorch_quantization.__file__}' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(__a , ['''embeddings'''] , which='''weight''' , _disabled=__a ) if args.quant_disable: set_quantizer_by_name(__a , [''''''] , _disabled=__a ) if args.quant_disable_keyword: set_quantizer_by_name(__a , args.quant_disable_keyword , _disabled=__a ) if args.quant_disable_layer_module: set_quantizer_by_name(__a , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=__a ) if args.quant_enable_layer_module: set_quantizer_by_name(__a , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=__a ) if args.recalibrate_weights: recalibrate_weights(__a ) if args.fuse_qkv: fuse_qkv(__a , __a ) if args.clip_gelu: clip_gelu(__a , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(__a ) def _A (__a ) -> int: """simple docstring""" logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'{name:80}: {module}' ) def _A (__a , __a ) -> Optional[int]: """simple docstring""" logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(__a ) def _A (__a , __a ) -> Optional[int]: """simple docstring""" def fusea(__a , __a , __a ): for mod in [qq, qk, qv]: if not hasattr(__a , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return SCREAMING_SNAKE_CASE_ : List[Any] = qq._amax.detach().item() SCREAMING_SNAKE_CASE_ : Dict = qk._amax.detach().item() SCREAMING_SNAKE_CASE_ : List[str] = qv._amax.detach().item() SCREAMING_SNAKE_CASE_ : Union[str, Any] = max(__a , __a , __a ) qq._amax.fill_(__a ) qk._amax.fill_(__a ) qv._amax.fill_(__a ) logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f'FUSE_QKV: {name:{name_width}}' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _A (__a , __a ) -> List[str]: """simple docstring""" for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = mod._input_quantizer._amax.data.detach().item() logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' ) def _A (__a ) -> Dict: """simple docstring""" for name, mod in model.named_modules(): if hasattr(__a , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: SCREAMING_SNAKE_CASE_ : Optional[Any] = mod.weight.shape[0] SCREAMING_SNAKE_CASE_ : Tuple = mod._weight_quantizer._amax.detach() SCREAMING_SNAKE_CASE_ : str = torch.ones(__a , dtype=amax.dtype , device=amax.device ) * amax print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' ) def _A (__a ) -> List[Any]: """simple docstring""" for name, mod in model.named_modules(): if hasattr(__a , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) SCREAMING_SNAKE_CASE_ : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) SCREAMING_SNAKE_CASE_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set SCREAMING_SNAKE_CASE_ : Any = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__a , keepdims=__a ).detach() logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' ) SCREAMING_SNAKE_CASE_ : Dict = amax def _A (__a , __a=25 , __a=1_80 , __a=None ) -> List[str]: """simple docstring""" if ignore is None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] elif not isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : int = [ignore] SCREAMING_SNAKE_CASE_ : List[Any] = 0 for name, mod in model.named_modules(): if not hasattr(__a , '''weight''' ): continue SCREAMING_SNAKE_CASE_ : List[str] = max(__a , len(__a ) ) for name, mod in model.named_modules(): SCREAMING_SNAKE_CASE_ : int = getattr(__a , '''_input_quantizer''' , __a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(__a , '''_weight_quantizer''' , __a ) if not hasattr(__a , '''weight''' ): continue if type(__a ) in ignore: continue if [True for s in ignore if type(__a ) is str and s in name]: continue SCREAMING_SNAKE_CASE_ : List[str] = f'Act:{input_q.extra_repr()}' SCREAMING_SNAKE_CASE_ : Tuple = f'Wgt:{weight_q.extra_repr()}' SCREAMING_SNAKE_CASE_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}' if len(__a ) <= line_width: logger.info(__a ) else: logger.info(f'{name:{name_width}} {act_str}' ) logger.info(f'{" ":{name_width}} {wgt_str}' ) def _A (__a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = 0 for name, mod in model.named_modules(): if isinstance(__a , pytorch_quantization.nn.TensorQuantizer ): print(f'{name:80} {mod}' ) count += 1 print(f'{count} TensorQuantizers found in model' ) def _A (__a , __a , __a , __a , __a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = getattr(__a , __a , __a ) if quantizer_mod is not None: assert hasattr(__a , __a ) setattr(__a , __a , __a ) else: logger.warning(f'{name} has no {quantizer}' ) def _A (__a , __a , __a="both" , **__a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = f'Warning: changing {which} quantizers of {name:{qname_width}}' for k, v in kwargs.items(): s += f' {k}={v}' if which in ["input", "both"]: set_quantizer(__a , __a , '''_input_quantizer''' , __a , __a ) if which in ["weight", "both"]: set_quantizer(__a , __a , '''_weight_quantizer''' , __a , __a ) logger.info(__a ) def _A (__a , __a , **__a ) -> List[str]: """simple docstring""" for name, mod in model.named_modules(): if hasattr(__a , '''_input_quantizer''' ) or hasattr(__a , '''_weight_quantizer''' ): for n in names: if re.search(__a , __a ): set_quantizers(__a , __a , **__a ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(__a , __a ): SCREAMING_SNAKE_CASE_ : Dict = f'Warning: changing {name:{name_width}}' for k, v in kwargs.items(): s += f' {k}={v}' setattr(__a , __a , __a ) logger.info(__a )
91
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
91
1
"""simple docstring""" import sys UpperCAmelCase_ : Any = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def _A (__a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = 1 for digit in s: product *= int(__a ) return product def _A (__a = N ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = -sys.maxsize - 1 SCREAMING_SNAKE_CASE_ : str = n[:13] SCREAMING_SNAKE_CASE_ : Union[str, Any] = 13 while cur_index < len(__a ) - 13: if int(n[cur_index] ) >= int(substr[0] ): SCREAMING_SNAKE_CASE_ : Optional[int] = substr[1:] + n[cur_index] cur_index += 1 else: SCREAMING_SNAKE_CASE_ : int = max(__a , str_eval(__a ) ) SCREAMING_SNAKE_CASE_ : Optional[Any] = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
91
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL UpperCAmelCase_ : int = logging.get_logger(__name__) def _A (__a ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__a ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ): '''simple docstring''' super().__init__(**lowercase_) SCREAMING_SNAKE_CASE_ : str = size if size is not None else {'''shortest_edge''': 256} SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''') SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize SCREAMING_SNAKE_CASE_ : List[Any] = size SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop SCREAMING_SNAKE_CASE_ : Dict = crop_size SCREAMING_SNAKE_CASE_ : List[Any] = resample SCREAMING_SNAKE_CASE_ : List[str] = do_rescale SCREAMING_SNAKE_CASE_ : List[str] = rescale_factor SCREAMING_SNAKE_CASE_ : List[Any] = offset SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize SCREAMING_SNAKE_CASE_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_) if "shortest_edge" in size: SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(lowercase_ , size['''shortest_edge'''] , default_to_square=lowercase_) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE_ : Union[str, Any] = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}') return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_) if "height" not in size or "width" not in size: raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}') return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = image.astype(np.floataa) if offset: SCREAMING_SNAKE_CASE_ : Tuple = image - (scale / 2) return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''') # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ : List[str] = to_numpy_array(lowercase_) if do_resize: SCREAMING_SNAKE_CASE_ : List[Any] = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) if do_center_crop: SCREAMING_SNAKE_CASE_ : Dict = self.center_crop(lowercase_ , size=lowercase_) if do_rescale: SCREAMING_SNAKE_CASE_ : int = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_) if do_normalize: SCREAMING_SNAKE_CASE_ : Dict = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = to_channel_dimension_format(lowercase_ , lowercase_) return image def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ : Dict = offset if offset is not None else self.offset SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ : Dict = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''') if not valid_images(lowercase_): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') SCREAMING_SNAKE_CASE_ : Tuple = make_batched(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = [ [ self._preprocess_image( image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , ) for img in video ] for video in videos ] SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': videos} return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
91
1
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = """▁""" UpperCAmelCase_ : Tuple = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", """tokenizer_config_file""": """tokenizer_config.json""", } UpperCAmelCase_ : Optional[Any] = { """vocab_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""", }, """spm_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_config_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""", }, } UpperCAmelCase_ : Tuple = { """facebook/m2m100_418M""": 1024, } # fmt: off UpperCAmelCase_ : Optional[Any] = { """m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""], """wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""] } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = ["input_ids", "attention_mask"] __UpperCamelCase = [] __UpperCamelCase = [] def __init__( self : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[int]=None , lowercase_ : Optional[int]=None , lowercase_ : Union[str, Any]="<s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="<pad>" , lowercase_ : List[str]="<unk>" , lowercase_ : List[Any]="m2m100" , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : Any=8 , **lowercase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = {} if sp_model_kwargs is None else sp_model_kwargs SCREAMING_SNAKE_CASE_ : Optional[Any] = language_codes SCREAMING_SNAKE_CASE_ : int = FAIRSEQ_LANGUAGE_CODES[language_codes] SCREAMING_SNAKE_CASE_ : List[str] = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code} SCREAMING_SNAKE_CASE_ : Tuple = kwargs.get('''additional_special_tokens''' , []) kwargs["additional_special_tokens"] += [ self.get_lang_token(lowercase_) for lang_code in fairseq_language_code if self.get_lang_token(lowercase_) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=lowercase_ , tgt_lang=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , language_codes=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_file SCREAMING_SNAKE_CASE_ : List[Any] = load_json(lowercase_) SCREAMING_SNAKE_CASE_ : Dict = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ : int = spm_file SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_spm(lowercase_ , self.sp_model_kwargs) SCREAMING_SNAKE_CASE_ : Dict = len(self.encoder) SCREAMING_SNAKE_CASE_ : List[str] = { self.get_lang_token(lowercase_): self.encoder_size + i for i, lang_code in enumerate(lowercase_) } SCREAMING_SNAKE_CASE_ : Union[str, Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase_)} SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.lang_token_to_id.items()} SCREAMING_SNAKE_CASE_ : str = src_lang if src_lang is not None else '''en''' SCREAMING_SNAKE_CASE_ : Optional[int] = tgt_lang SCREAMING_SNAKE_CASE_ : Any = self.get_lang_id(self._src_lang) self.set_src_lang_special_tokens(self._src_lang) SCREAMING_SNAKE_CASE_ : Tuple = num_madeup_words @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return len(self.encoder) + len(self.lang_token_to_id) @property def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' return self._src_lang @src_lang.setter def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str): '''simple docstring''' return self.sp_model.encode(lowercase_ , out_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : int): '''simple docstring''' if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(lowercase_ , self.encoder[self.unk_token]) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : int): '''simple docstring''' if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(lowercase_ , self.unk_token) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = [] SCREAMING_SNAKE_CASE_ : Dict = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase_) + token SCREAMING_SNAKE_CASE_ : str = [] else: current_sub_tokens.append(lowercase_) out_string += self.sp_model.decode(lowercase_) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = [1] * len(self.prefix_tokens) SCREAMING_SNAKE_CASE_ : List[str] = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(lowercase_)) + suffix_ones return prefix_ones + ([0] * len(lowercase_)) + ([0] * len(lowercase_)) + suffix_ones def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy() SCREAMING_SNAKE_CASE_ : List[str] = None return state def __setstate__( self : Optional[int] , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): SCREAMING_SNAKE_CASE_ : Optional[int] = {} SCREAMING_SNAKE_CASE_ : str = load_spm(self.spm_file , self.sp_model_kwargs) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = Path(lowercase_) if not save_dir.is_dir(): raise OSError(F'{save_directory} should be a directory') SCREAMING_SNAKE_CASE_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) SCREAMING_SNAKE_CASE_ : int = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , lowercase_) if os.path.abspath(self.spm_file) != os.path.abspath(lowercase_) and os.path.isfile(self.spm_file): copyfile(self.spm_file , lowercase_) elif not os.path.isfile(self.spm_file): with open(lowercase_ , '''wb''') as fi: SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.serialized_model_proto() fi.write(lowercase_) return (str(lowercase_), str(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : List[str] , lowercase_ : str = "en" , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "ro" , **lowercase_ : List[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = src_lang SCREAMING_SNAKE_CASE_ : int = tgt_lang self.set_src_lang_special_tokens(self.src_lang) return super().prepare_seqaseq_batch(lowercase_ , lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[str] , lowercase_ : Optional[str] , **lowercase_ : Optional[int]): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = src_lang SCREAMING_SNAKE_CASE_ : str = self(lowercase_ , add_special_tokens=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.get_lang_id(lowercase_) SCREAMING_SNAKE_CASE_ : Any = tgt_lang_id return inputs def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' self.set_src_lang_special_tokens(self.src_lang) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' self.set_tgt_lang_special_tokens(self.tgt_lang) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_lang_token(lowercase_) SCREAMING_SNAKE_CASE_ : str = self.lang_token_to_id[lang_token] SCREAMING_SNAKE_CASE_ : List[Any] = [self.cur_lang_id] SCREAMING_SNAKE_CASE_ : Any = [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.get_lang_token(lowercase_) SCREAMING_SNAKE_CASE_ : Dict = self.lang_token_to_id[lang_token] SCREAMING_SNAKE_CASE_ : List[Any] = [self.cur_lang_id] SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : str): '''simple docstring''' return self.lang_code_to_token[lang] def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.get_lang_token(lowercase_) return self.lang_token_to_id[lang_token] def _A (__a , __a ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = sentencepiece.SentencePieceProcessor(**__a ) spm.Load(str(__a ) ) return spm def _A (__a ) -> Union[Dict, List]: """simple docstring""" with open(__a , '''r''' ) as f: return json.load(__a ) def _A (__a , __a ) -> None: """simple docstring""" with open(__a , '''w''' ) as f: json.dump(__a , __a , indent=2 )
91
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase_ : Dict = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } UpperCAmelCase_ : List[str] = { """gpt2""": 1024, """gpt2-medium""": 1024, """gpt2-large""": 1024, """gpt2-xl""": 1024, """distilgpt2""": 1024, } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] __UpperCamelCase = GPTaTokenizer def __init__( self : Optional[int] , lowercase_ : int=None , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple="<|endoftext|>" , lowercase_ : str="<|endoftext|>" , lowercase_ : Dict="<|endoftext|>" , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ): '''simple docstring''' super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('''add_bos_token''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , lowercase_) != add_prefix_space: SCREAMING_SNAKE_CASE_ : int = getattr(lowercase_ , pre_tok_state.pop('''type''')) SCREAMING_SNAKE_CASE_ : str = add_prefix_space SCREAMING_SNAKE_CASE_ : Dict = pre_tok_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = add_prefix_space def _SCREAMING_SNAKE_CASE ( self : str , *lowercase_ : List[Any] , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''is_split_into_words''' , lowercase_) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowercase_ : List[str] , **lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = kwargs.get('''is_split_into_words''' , lowercase_) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_) return tuple(lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : "Conversation"): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_) + [self.eos_token_id]) if len(lowercase_) > self.model_max_length: SCREAMING_SNAKE_CASE_ : Any = input_ids[-self.model_max_length :] return input_ids
91
1
"""simple docstring""" UpperCAmelCase_ : Any = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ UpperCAmelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}] UpperCAmelCase_ : Tuple = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
91
"""simple docstring""" import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(lowercase_ , '''hidden_sizes''')) self.parent.assertTrue(hasattr(lowercase_ , '''num_attention_heads''')) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=13 , lowercase_ : Dict=64 , lowercase_ : Dict=3 , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=2 , lowercase_ : Any=1 , lowercase_ : List[Any]=16 , lowercase_ : int=[128, 256, 384] , lowercase_ : str=[4, 6, 8] , lowercase_ : Optional[Any]=[2, 3, 4] , lowercase_ : Union[str, Any]=[16, 16, 16] , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[int]=[2, 2, 2] , lowercase_ : Any=[2, 2, 2] , lowercase_ : List[str]=0.02 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=2 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = parent SCREAMING_SNAKE_CASE_ : Any = batch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size SCREAMING_SNAKE_CASE_ : int = num_channels SCREAMING_SNAKE_CASE_ : List[Any] = kernel_size SCREAMING_SNAKE_CASE_ : Optional[Any] = stride SCREAMING_SNAKE_CASE_ : List[str] = padding SCREAMING_SNAKE_CASE_ : int = hidden_sizes SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE_ : int = depths SCREAMING_SNAKE_CASE_ : Optional[Any] = key_dim SCREAMING_SNAKE_CASE_ : Optional[Any] = drop_path_rate SCREAMING_SNAKE_CASE_ : Tuple = patch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_ratio SCREAMING_SNAKE_CASE_ : str = mlp_ratio SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE_ : List[Any] = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] SCREAMING_SNAKE_CASE_ : Any = is_training SCREAMING_SNAKE_CASE_ : Tuple = use_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_labels) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = LevitModel(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_) SCREAMING_SNAKE_CASE_ : Any = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = image_size[0], image_size[1] for _ in range(4): SCREAMING_SNAKE_CASE_ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1) SCREAMING_SNAKE_CASE_ : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.num_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitForImageClassification(lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) __UpperCamelCase = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitModelTester(self) SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' return @unittest.skip(reason='''Levit does not use inputs_embeds''') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' pass @unittest.skip(reason='''Levit does not support input and output embeddings''') def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' pass @unittest.skip(reason='''Levit does not output attentions''') def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ : Dict = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' def check_hidden_states_output(lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str): SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_)) SCREAMING_SNAKE_CASE_ : str = outputs.hidden_states SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.model_tester.depths) + 1 self.assertEqual(len(lowercase_) , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size[0], image_size[1] for _ in range(4): SCREAMING_SNAKE_CASE_ : Optional[Any] = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1) SCREAMING_SNAKE_CASE_ : Optional[int] = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ : Tuple = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : Union[str, Any] = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowercase_) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_) model.to(lowercase_) model.train() SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase_).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ : Union[str, Any] = False SCREAMING_SNAKE_CASE_ : Optional[int] = True for model_class in self.all_model_classes: if model_class in get_values(lowercase_) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE_ : List[str] = model_class(lowercase_) model.gradient_checkpointing_enable() model.to(lowercase_) model.train() SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model(**lowercase_).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : List[Any] = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowercase_), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}'): SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''title'''] SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''num_labels'''] SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_) model.to(lowercase_) model.train() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE_ : str = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels''']) SCREAMING_SNAKE_CASE_ : Any = inputs['''labels'''].to(problem_type['''dtype''']) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowercase_) as warning_list: SCREAMING_SNAKE_CASE_ : int = model(**lowercase_).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( F'Something is going wrong in the regression problem: intercepted {w.message}') loss.backward() @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Optional[Any] = LevitModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) def _A () -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.default_image_processor SCREAMING_SNAKE_CASE_ : str = prepare_img() SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Any = model(**lowercase_) # verify the logits SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([1.04_48, -0.37_45, -1.83_17]).to(lowercase_) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
91
1
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class lowerCAmelCase__ ( unittest.TestCase , UpperCAmelCase__ ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = load_tool('''text-classification''') self.tool.setup() SCREAMING_SNAKE_CASE_ : Tuple = load_tool('''text-classification''' , remote=lowercase_) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.tool('''That\'s quite cool''' , ['''positive''', '''negative''']) self.assertEqual(lowercase_ , '''positive''') def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative''']) self.assertEqual(lowercase_ , '''positive''') def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative''']) self.assertEqual(lowercase_ , '''positive''') def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative''']) self.assertEqual(lowercase_ , '''positive''')
91
"""simple docstring""" from math import factorial def _A (__a = 20 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... SCREAMING_SNAKE_CASE_ : List[str] = n // 2 return int(factorial(__a ) / (factorial(__a ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: UpperCAmelCase_ : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number.""")
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable UpperCAmelCase_ : Optional[int] = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[Any] = ["""DPTFeatureExtractor"""] UpperCAmelCase_ : Optional[Any] = ["""DPTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[int] = [ """DPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DPTForDepthEstimation""", """DPTForSemanticSegmentation""", """DPTModel""", """DPTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
91
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCAmelCase_ : Any = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : List[str]): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_)
91
1
"""simple docstring""" class lowerCAmelCase__ : # Public class to implement a graph '''simple docstring''' def __init__( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = row SCREAMING_SNAKE_CASE_ : Tuple = col SCREAMING_SNAKE_CASE_ : Union[str, Any] = graph def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]]): '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order SCREAMING_SNAKE_CASE_ : Tuple = [-1, 0, 1, -1, 1, -1, 0, 1] SCREAMING_SNAKE_CASE_ : Tuple = True # Make those cells visited for k in range(8): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowercase_): self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowercase_) def _SCREAMING_SNAKE_CASE ( self : int): # And finally, count all islands. '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = [[False for j in range(self.COL)] for i in range(self.ROW)] SCREAMING_SNAKE_CASE_ : Dict = 0 for i in range(self.ROW): for j in range(self.COL): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(lowercase_ , lowercase_ , lowercase_) count += 1 return count
91
"""simple docstring""" from __future__ import annotations class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : int = 0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE_ : List[str] = '''''' for ch in content: ans += chr(ord(lowercase_) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE_ : List[Any] = '''''' for ch in content: ans += chr(ord(lowercase_) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) try: with open(lowercase_) as fin, open('''encrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowercase_ , lowercase_)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) try: with open(lowercase_) as fin, open('''decrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowercase_ , lowercase_)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
91
1
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) def _A (__a , __a ) -> Tuple: """simple docstring""" try: with open(__a , '''rb''' ) as flax_state_f: SCREAMING_SNAKE_CASE_ : Optional[int] = from_bytes(__a , flax_state_f.read() ) except UnpicklingError as e: try: with open(__a ) as f: if f.read().startswith('''version''' ): raise OSError( '''You seem to have cloned a repository without having git-lfs installed. Please''' ''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the''' ''' folder you cloned.''' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(__a , __a ) def _A (__a , __a ) -> Tuple: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights SCREAMING_SNAKE_CASE_ : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , __a ) ).values() if any(__a ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.tree_util.tree_map( lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __a ) SCREAMING_SNAKE_CASE_ : int = '''''' SCREAMING_SNAKE_CASE_ : str = flatten_dict(__a , sep='''.''' ) SCREAMING_SNAKE_CASE_ : List[Any] = pt_model.state_dict() # keep track of unexpected & missing keys SCREAMING_SNAKE_CASE_ : str = [] SCREAMING_SNAKE_CASE_ : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple.split('''.''' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple_array[:-1] + ['''weight'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.transpose(__a , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": SCREAMING_SNAKE_CASE_ : Tuple = flax_key_tuple_array[:-1] + ['''weight'''] SCREAMING_SNAKE_CASE_ : Optional[int] = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": SCREAMING_SNAKE_CASE_ : Optional[int] = flax_key_tuple_array[:-1] + ['''weight'''] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(__a ): SCREAMING_SNAKE_CASE_ : List[str] = ( flax_key_tuple_string.replace('''_0''' , '''.0''' ) .replace('''_1''' , '''.1''' ) .replace('''_2''' , '''.2''' ) .replace('''_3''' , '''.3''' ) .replace('''_4''' , '''.4''' ) .replace('''_5''' , '''.5''' ) .replace('''_6''' , '''.6''' ) .replace('''_7''' , '''.7''' ) .replace('''_8''' , '''.8''' ) .replace('''_9''' , '''.9''' ) ) SCREAMING_SNAKE_CASE_ : Optional[Any] = '''.'''.join(__a ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(__a ) if not isinstance(__a , np.ndarray ) else flax_tensor SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(__a ) # remove from missing keys missing_keys.remove(__a ) else: # weight is not expected by PyTorch model unexpected_keys.append(__a ) pt_model.load_state_dict(__a ) # re-transform missing_keys to list SCREAMING_SNAKE_CASE_ : int = list(__a ) if len(__a ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) if len(__a ) > 0: logger.warning( f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) return pt_model
91
"""simple docstring""" def _A (__a = 50 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
91
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "megatron-bert" def __init__( self : int , lowercase_ : int=29056 , lowercase_ : Dict=1024 , lowercase_ : Optional[Any]=24 , lowercase_ : Union[str, Any]=16 , lowercase_ : Optional[int]=4096 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : int=512 , lowercase_ : Optional[Any]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1e-12 , lowercase_ : Optional[Any]=0 , lowercase_ : Dict="absolute" , lowercase_ : Any=True , **lowercase_ : List[str] , ): '''simple docstring''' super().__init__(pad_token_id=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = vocab_size SCREAMING_SNAKE_CASE_ : List[str] = hidden_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : Any = num_attention_heads SCREAMING_SNAKE_CASE_ : str = hidden_act SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings SCREAMING_SNAKE_CASE_ : Optional[int] = type_vocab_size SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE_ : Any = position_embedding_type SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_cache
91
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = (PNDMScheduler,) __UpperCamelCase = (("num_inference_steps", 5_0),) def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = { '''num_train_timesteps''': 1000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowercase_) return config def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]=0 , **lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class.from_pretrained(lowercase_) new_scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Optional[Any] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]=0 , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(lowercase_) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Any = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Tuple = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : str , **lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Dict = 10 SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_) for i, t in enumerate(scheduler.prk_timesteps): SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample for i, t in enumerate(scheduler.plms_timesteps): SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_).prev_sample return sample def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''num_inference_steps''' , lowercase_) for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps'''): scheduler.set_timesteps(lowercase_) elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps'''): SCREAMING_SNAKE_CASE_ : Optional[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] SCREAMING_SNAKE_CASE_ : Optional[int] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Any = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config(steps_offset=1) SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**lowercase_) scheduler.set_timesteps(10) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , ) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]): self.check_over_forward(num_inference_steps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27 for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample SCREAMING_SNAKE_CASE_ : str = 0.1 * sample SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2]): SCREAMING_SNAKE_CASE_ : int = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' with self.assertRaises(lowercase_): SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**lowercase_) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.full_loop() SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_98.13_18) < 1e-2 assert abs(result_mean.item() - 0.25_80) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.full_loop(prediction_type='''v_prediction''') SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 67.39_86) < 1e-2 assert abs(result_mean.item() - 0.08_78) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 2_30.03_99) < 1e-2 assert abs(result_mean.item() - 0.29_95) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_86.94_82) < 1e-2 assert abs(result_mean.item() - 0.24_34) < 1e-3
91
1
"""simple docstring""" from __future__ import annotations import typing from collections.abc import Iterable import numpy as np UpperCAmelCase_ : int = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 UpperCAmelCase_ : str = typing.Union[np.floataa, int, float] # noqa: UP007 def _A (__a , __a ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(__a ) - np.asarray(__a )) ** 2 ) ) def _A (__a , __a ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(__a , __a ) ) ** (1 / 2) if __name__ == "__main__": def _A () -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) ) benchmark()
91
"""simple docstring""" import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @parameterized.expand([(None,), ('''foo.json''',)]) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50) self.assertEqual(loaded_config.max_length , 20) self.assertEqual(loaded_config.max_time , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained('''gpt2''') SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_model_config(lowercase_) SCREAMING_SNAKE_CASE_ : int = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = GenerationConfig() SCREAMING_SNAKE_CASE_ : Any = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = generation_config.update(**lowercase_) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {'''foo''': '''bar'''}) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig() SCREAMING_SNAKE_CASE_ : List[str] = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''') as tmp_dir: generation_config.save_pretrained(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = GenerationConfig.from_pretrained(lowercase_) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''') SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig.from_model_config(lowercase_) assert not hasattr(lowercase_ , '''foo''') # no new kwargs should be initialized if from config def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , lowercase_) self.assertEqual(default_config.num_beams , 1) SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , lowercase_) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , lowercase_) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = TOKEN HfFolder.save_token(lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str]): '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-generation-config''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''') except HTTPError: pass def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_pretrained(F'{USER}/test-generation-config') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id='''test-generation-config''' , push_to_hub=lowercase_ , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Optional[int] = GenerationConfig.from_pretrained(F'{USER}/test-generation-config') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowercase_ , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
91
1
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def _A (__a , __a , __a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = LxmertConfig.from_json_file(__a ) print(f'Building PyTorch model from configuration: {config}' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = LxmertForPreTraining(__a ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(__a , __a , __a ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , __a ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
91
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets UpperCAmelCase_ : Optional[Any] = datasets.logging.get_logger(__name__) UpperCAmelCase_ : List[str] = """\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\", author = \"Moosavi, Nafise Sadat and Strube, Michael\", booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\", month = aug, year = \"2016\", address = \"Berlin, Germany\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/P16-1060\", doi = \"10.18653/v1/P16-1060\", pages = \"632--642\", } """ UpperCAmelCase_ : Tuple = """\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. """ UpperCAmelCase_ : Union[str, Any] = """ Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting 'keep_singletons=False', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs. min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: 'mentions': mentions 'muc': MUC metric [Vilain et al, 1995] 'bcub': B-cubed [Bagga and Baldwin, 1998] 'ceafe': CEAFe [Luo et al., 2005] 'lea': LEA [Moosavi and Strube, 2016] 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric('coval') >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -', ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)', ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)', ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -', ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -', ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {'mentions/recall': 1.0,[...] 'conll_score': 100.0} """ def _A (__a , __a , __a=False , __a=False , __a=True , __a=False , __a="dummy_doc" ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = {doc: key_lines} SCREAMING_SNAKE_CASE_ : List[str] = {doc: sys_lines} SCREAMING_SNAKE_CASE_ : Dict = {} SCREAMING_SNAKE_CASE_ : Dict = 0 SCREAMING_SNAKE_CASE_ : List[str] = 0 SCREAMING_SNAKE_CASE_ : Tuple = 0 SCREAMING_SNAKE_CASE_ : int = 0 SCREAMING_SNAKE_CASE_ : List[str] = 0 SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a ) key_singletons_num += singletons_num if NP_only or min_span: SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a ) sys_singletons_num += singletons_num if NP_only or min_span: SCREAMING_SNAKE_CASE_ : Union[str, Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a ) if remove_nested: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( '''Number of resulting singleton clusters in the key ''' f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' '''files, respectively''' ) return doc_coref_infos def _A (__a , __a , __a , __a , __a , __a , __a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = get_coref_infos(__a , __a , __a , __a , __a , __a ) SCREAMING_SNAKE_CASE_ : str = {} SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE_ : str = 0 for name, metric in metrics: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = evaluator.evaluate_documents(__a , __a , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} ) logger.info( name.ljust(10 ) , f'Recall: {recall * 1_00:.2f}' , f' Precision: {precision * 1_00:.2f}' , f' F1: {fa * 1_00:.2f}' , ) if conll_subparts_num == 3: SCREAMING_SNAKE_CASE_ : Tuple = (conll / 3) * 1_00 logger.info(f'CoNLL score: {conll:.2f}' ) output_scores.update({'''conll_score''': conll} ) return output_scores def _A (__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: SCREAMING_SNAKE_CASE_ : Any = line.split()[5] if not parse_col == "-": SCREAMING_SNAKE_CASE_ : Any = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''')), '''references''': datasets.Sequence(datasets.Value('''string''')), }) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=True , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: SCREAMING_SNAKE_CASE_ : Union[str, Any] = util.check_gold_parse_annotation(lowercase_) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''') # util.parse_key_file(key_file) # key_file = key_file + ".parsed" SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate( key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , ) return score
91
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : int = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = { """vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""", # See all GLPN models at https://huggingface.co/models?filter=glpn } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "glpn" def __init__( self : Tuple , lowercase_ : Union[str, Any]=3 , lowercase_ : Tuple=4 , lowercase_ : Dict=[2, 2, 2, 2] , lowercase_ : str=[8, 4, 2, 1] , lowercase_ : Optional[Any]=[32, 64, 160, 256] , lowercase_ : Dict=[7, 3, 3, 3] , lowercase_ : List[Any]=[4, 2, 2, 2] , lowercase_ : List[Any]=[1, 2, 5, 8] , lowercase_ : str=[4, 4, 4, 4] , lowercase_ : Any="gelu" , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Dict=0.1 , lowercase_ : Any=1e-6 , lowercase_ : Dict=64 , lowercase_ : Union[str, Any]=10 , lowercase_ : Dict=-1 , **lowercase_ : Dict , ): '''simple docstring''' super().__init__(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels SCREAMING_SNAKE_CASE_ : str = num_encoder_blocks SCREAMING_SNAKE_CASE_ : Union[str, Any] = depths SCREAMING_SNAKE_CASE_ : Dict = sr_ratios SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_sizes SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_sizes SCREAMING_SNAKE_CASE_ : List[Any] = strides SCREAMING_SNAKE_CASE_ : int = mlp_ratios SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE_ : List[str] = hidden_act SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range SCREAMING_SNAKE_CASE_ : Union[str, Any] = drop_path_rate SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps SCREAMING_SNAKE_CASE_ : Any = decoder_hidden_size SCREAMING_SNAKE_CASE_ : Dict = max_depth SCREAMING_SNAKE_CASE_ : Optional[Any] = head_in_index
91
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Tuple = """▁""" UpperCAmelCase_ : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCAmelCase_ : str = { """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } UpperCAmelCase_ : str = { """facebook/xglm-564M""": 2048, } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self : List[Any] , lowercase_ : str , lowercase_ : Tuple="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer SCREAMING_SNAKE_CASE_ : List[str] = 7 SCREAMING_SNAKE_CASE_ : Tuple = [F'<madeupword{i}>' for i in range(self.num_madeup_words)] SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''additional_special_tokens''' , []) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} SCREAMING_SNAKE_CASE_ : List[Any] = len(self.sp_model) SCREAMING_SNAKE_CASE_ : Optional[Any] = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)} self.fairseq_tokens_to_ids.update(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy() SCREAMING_SNAKE_CASE_ : str = None SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Tuple , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} SCREAMING_SNAKE_CASE_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_) if token_ids_a is None: return [1] + ([0] * len(lowercase_)) return [1] + ([0] * len(lowercase_)) + [1, 1] + ([0] * len(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a) * [0] @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str): '''simple docstring''' return self.sp_model.encode(lowercase_ , out_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Union[str, Any]): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.PieceToId(lowercase_) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any]): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(lowercase_).replace(lowercase_ , ''' ''').strip() return out_string def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' if not os.path.isdir(lowercase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , lowercase_) elif not os.path.isfile(self.vocab_file): with open(lowercase_ , '''wb''') as fi: SCREAMING_SNAKE_CASE_ : int = self.sp_model.serialized_model_proto() fi.write(lowercase_) return (out_vocab_file,)
91
1
"""simple docstring""" from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCAmelCase_ : int = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , *lowercase_ : Optional[int] , **lowercase_ : List[str]): '''simple docstring''' super().__init__(*lowercase_ , **lowercase_) requires_backends(self , '''vision''') self.check_model_type(lowercase_) def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Any): '''simple docstring''' return super().__call__(lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **lowercase_ : List[Any]): '''simple docstring''' return {}, {}, {} def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = load_image(lowercase_) SCREAMING_SNAKE_CASE_ : Dict = image.size SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor(images=lowercase_ , return_tensors=self.framework) return model_inputs def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model(**lowercase_) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_outputs.predicted_depth SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = prediction.squeeze().cpu().numpy() SCREAMING_SNAKE_CASE_ : str = (output * 255 / np.max(lowercase_)).astype('''uint8''') SCREAMING_SNAKE_CASE_ : int = Image.fromarray(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} SCREAMING_SNAKE_CASE_ : Tuple = predicted_depth SCREAMING_SNAKE_CASE_ : str = depth return output_dict
91
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Dict = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16''' self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : str = '''fp16''' self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
91
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase_ : Any = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Any , ): '''simple docstring''' super().__init__(**lowercase_) SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {'''shortest_edge''': 224} SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='''crop_size''') SCREAMING_SNAKE_CASE_ : List[Any] = do_resize SCREAMING_SNAKE_CASE_ : Tuple = size SCREAMING_SNAKE_CASE_ : str = resample SCREAMING_SNAKE_CASE_ : Any = do_center_crop SCREAMING_SNAKE_CASE_ : Any = crop_size SCREAMING_SNAKE_CASE_ : Dict = do_rescale SCREAMING_SNAKE_CASE_ : Any = rescale_factor SCREAMING_SNAKE_CASE_ : Optional[Any] = do_normalize SCREAMING_SNAKE_CASE_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN SCREAMING_SNAKE_CASE_ : str = image_std if image_std is not None else OPENAI_CLIP_STD SCREAMING_SNAKE_CASE_ : str = do_convert_rgb def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(lowercase_ , default_to_square=lowercase_) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}') SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_resize_output_image_size(lowercase_ , size=size['''shortest_edge'''] , default_to_square=lowercase_) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(lowercase_) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}') return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ): '''simple docstring''' return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ): '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ : int = size if size is not None else self.size SCREAMING_SNAKE_CASE_ : Any = get_size_dict(lowercase_ , param_name='''size''' , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE_ : List[str] = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(lowercase_ , param_name='''crop_size''' , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb SCREAMING_SNAKE_CASE_ : Optional[int] = make_list_of_images(lowercase_) if not valid_images(lowercase_): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') # PIL RGBA images are converted to RGB if do_convert_rgb: SCREAMING_SNAKE_CASE_ : int = [convert_to_rgb(lowercase_) for image in images] # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ : Tuple = [to_numpy_array(lowercase_) for image in images] if do_resize: SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE_ : str = [self.center_crop(image=lowercase_ , size=lowercase_) for image in images] if do_rescale: SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_ : Dict = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images] SCREAMING_SNAKE_CASE_ : List[str] = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images] SCREAMING_SNAKE_CASE_ : Tuple = {'''pixel_values''': images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
91
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable UpperCAmelCase_ : str = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Dict = ["""GPTNeoXTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[str] = [ """GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXForCausalLM""", """GPTNeoXForQuestionAnswering""", """GPTNeoXForSequenceClassification""", """GPTNeoXForTokenClassification""", """GPTNeoXLayer""", """GPTNeoXModel""", """GPTNeoXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = SwinvaConfig() SCREAMING_SNAKE_CASE_ : Optional[Any] = swinva_name.split('''_''' ) SCREAMING_SNAKE_CASE_ : int = name_split[1] if "to" in name_split[3]: SCREAMING_SNAKE_CASE_ : str = int(name_split[3][-3:] ) else: SCREAMING_SNAKE_CASE_ : Any = int(name_split[3] ) if "to" in name_split[2]: SCREAMING_SNAKE_CASE_ : Tuple = int(name_split[2][-2:] ) else: SCREAMING_SNAKE_CASE_ : Dict = int(name_split[2][6:] ) if model_size == "tiny": SCREAMING_SNAKE_CASE_ : int = 96 SCREAMING_SNAKE_CASE_ : Dict = (2, 2, 6, 2) SCREAMING_SNAKE_CASE_ : Union[str, Any] = (3, 6, 12, 24) elif model_size == "small": SCREAMING_SNAKE_CASE_ : Union[str, Any] = 96 SCREAMING_SNAKE_CASE_ : Optional[int] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE_ : int = (3, 6, 12, 24) elif model_size == "base": SCREAMING_SNAKE_CASE_ : Optional[Any] = 1_28 SCREAMING_SNAKE_CASE_ : Optional[Any] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE_ : Any = (4, 8, 16, 32) else: SCREAMING_SNAKE_CASE_ : Optional[int] = 1_92 SCREAMING_SNAKE_CASE_ : Union[str, Any] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE_ : Optional[Any] = (6, 12, 24, 48) if "to" in swinva_name: SCREAMING_SNAKE_CASE_ : Dict = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): SCREAMING_SNAKE_CASE_ : List[str] = 2_18_41 SCREAMING_SNAKE_CASE_ : List[str] = '''huggingface/label-files''' SCREAMING_SNAKE_CASE_ : Any = '''imagenet-22k-id2label.json''' SCREAMING_SNAKE_CASE_ : Optional[int] = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) ) SCREAMING_SNAKE_CASE_ : int = {int(__a ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ : str = idalabel SCREAMING_SNAKE_CASE_ : Tuple = {v: k for k, v in idalabel.items()} else: SCREAMING_SNAKE_CASE_ : Any = 10_00 SCREAMING_SNAKE_CASE_ : Tuple = '''huggingface/label-files''' SCREAMING_SNAKE_CASE_ : Any = '''imagenet-1k-id2label.json''' SCREAMING_SNAKE_CASE_ : List[Any] = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) ) SCREAMING_SNAKE_CASE_ : List[str] = {int(__a ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ : List[str] = idalabel SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ : Dict = img_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_classes SCREAMING_SNAKE_CASE_ : Union[str, Any] = embed_dim SCREAMING_SNAKE_CASE_ : str = depths SCREAMING_SNAKE_CASE_ : Optional[Any] = num_heads SCREAMING_SNAKE_CASE_ : str = window_size return config def _A (__a ) -> Any: """simple docstring""" if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE_ : Dict = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: SCREAMING_SNAKE_CASE_ : Tuple = '''encoder.''' + name if "attn.proj" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: SCREAMING_SNAKE_CASE_ : str = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE_ : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: SCREAMING_SNAKE_CASE_ : Dict = name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: SCREAMING_SNAKE_CASE_ : Dict = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if name == "norm.weight": SCREAMING_SNAKE_CASE_ : Optional[Any] = '''layernorm.weight''' if name == "norm.bias": SCREAMING_SNAKE_CASE_ : Tuple = '''layernorm.bias''' if "head" in name: SCREAMING_SNAKE_CASE_ : Any = name.replace('''head''' , '''classifier''' ) else: SCREAMING_SNAKE_CASE_ : str = '''swinv2.''' + name return name def _A (__a , __a ) -> List[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE_ : Optional[Any] = orig_state_dict.pop(__a ) if "mask" in key: continue elif "qkv" in key: SCREAMING_SNAKE_CASE_ : Dict = key.split('''.''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(key_split[1] ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(key_split[3] ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE_ : Optional[Any] = val[:dim, :] SCREAMING_SNAKE_CASE_ : List[str] = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE_ : Dict = val[-dim:, :] else: SCREAMING_SNAKE_CASE_ : Any = val[:dim] SCREAMING_SNAKE_CASE_ : List[str] = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE_ : List[str] = val[-dim:] else: SCREAMING_SNAKE_CASE_ : Dict = val return orig_state_dict def _A (__a , __a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = timm.create_model(__a , pretrained=__a ) timm_model.eval() SCREAMING_SNAKE_CASE_ : int = get_swinva_config(__a ) SCREAMING_SNAKE_CASE_ : Any = SwinvaForImageClassification(__a ) model.eval() SCREAMING_SNAKE_CASE_ : Any = convert_state_dict(timm_model.state_dict() , __a ) model.load_state_dict(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE_ : List[Any] = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) ) SCREAMING_SNAKE_CASE_ : Tuple = Image.open(requests.get(__a , stream=__a ).raw ) SCREAMING_SNAKE_CASE_ : int = image_processor(images=__a , return_tensors='''pt''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = timm_model(inputs['''pixel_values'''] ) SCREAMING_SNAKE_CASE_ : Tuple = model(**__a ).logits assert torch.allclose(__a , __a , atol=1e-3 ) print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__a ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__a ) model.push_to_hub( repo_path_or_name=Path(__a , __a ) , organization='''nandwalritik''' , commit_message='''Add model''' , ) if __name__ == "__main__": UpperCAmelCase_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) UpperCAmelCase_ : int = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
91
"""simple docstring""" import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py UpperCAmelCase_ : Optional[int] = """src/transformers""" UpperCAmelCase_ : Tuple = """docs/source/en""" UpperCAmelCase_ : Optional[Any] = """.""" def _A (__a , __a , __a ) -> Dict: """simple docstring""" with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Dict = f.readlines() # Find the start prompt. SCREAMING_SNAKE_CASE_ : List[Any] = 0 while not lines[start_index].startswith(__a ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : Tuple = start_index while not lines[end_index].startswith(__a ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | UpperCAmelCase_ : Optional[Any] = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. UpperCAmelCase_ : int = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") UpperCAmelCase_ : Dict = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. UpperCAmelCase_ : int = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase_ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH) def _A (__a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __a ) return [m.group(0 ) for m in matches] def _A (__a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = 2 if text == '''✅''' or text == '''❌''' else len(__a ) SCREAMING_SNAKE_CASE_ : Tuple = (width - text_length) // 2 SCREAMING_SNAKE_CASE_ : Tuple = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ : Tuple = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } SCREAMING_SNAKE_CASE_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a ) # Let's lookup through all transformers object (once). for attr_name in dir(__a ): SCREAMING_SNAKE_CASE_ : Any = None if attr_name.endswith('''Tokenizer''' ): SCREAMING_SNAKE_CASE_ : Dict = slow_tokenizers SCREAMING_SNAKE_CASE_ : Dict = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = fast_tokenizers SCREAMING_SNAKE_CASE_ : Optional[Any] = attr_name[:-13] elif _re_tf_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : int = tf_models SCREAMING_SNAKE_CASE_ : Dict = _re_tf_models.match(__a ).groups()[0] elif _re_flax_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : Any = flax_models SCREAMING_SNAKE_CASE_ : Tuple = _re_flax_models.match(__a ).groups()[0] elif _re_pt_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : str = pt_models SCREAMING_SNAKE_CASE_ : int = _re_pt_models.match(__a ).groups()[0] if lookup_dict is not None: while len(__a ) > 0: if attr_name in model_name_to_prefix.values(): SCREAMING_SNAKE_CASE_ : List[str] = True break # Try again after removing the last word in the name SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(camel_case_split(__a )[:-1] ) # Let's build that table! SCREAMING_SNAKE_CASE_ : Any = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) SCREAMING_SNAKE_CASE_ : Any = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). SCREAMING_SNAKE_CASE_ : List[str] = [len(__a ) + 2 for c in columns] SCREAMING_SNAKE_CASE_ : str = max([len(__a ) for name in model_names] ) + 2 # Build the table per se SCREAMING_SNAKE_CASE_ : List[Any] = '''|''' + '''|'''.join([_center_text(__a , __a ) for c, w in zip(__a , __a )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" SCREAMING_SNAKE_CASE_ : Union[str, Any] = {True: '''✅''', False: '''❌'''} for name in model_names: SCREAMING_SNAKE_CASE_ : str = model_name_to_prefix[name] SCREAMING_SNAKE_CASE_ : int = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__a , __a ) for l, w in zip(__a , __a )] ) + "|\n" return table def _A (__a=False ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = _find_text_in_file( filename=os.path.join(__a , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) SCREAMING_SNAKE_CASE_ : Tuple = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__a , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ : Any = parser.parse_args() check_model_table(args.fix_and_overwrite)
91
1
"""simple docstring""" import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCAmelCase_ : List[str] = logging.getLogger(__name__) def _A () -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__a , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__a , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__a , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__a , default='''data/dump''' , help='''The dump file prefix.''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_args() logger.info(f'Loading Tokenizer ({args.tokenizer_name})' ) if args.tokenizer_type == "bert": SCREAMING_SNAKE_CASE_ : Dict = BertTokenizer.from_pretrained(args.tokenizer_name ) SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` SCREAMING_SNAKE_CASE_ : str = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": SCREAMING_SNAKE_CASE_ : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) SCREAMING_SNAKE_CASE_ : int = tokenizer.special_tokens_map['''cls_token'''] # `<s>` SCREAMING_SNAKE_CASE_ : Dict = tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": SCREAMING_SNAKE_CASE_ : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name ) SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` SCREAMING_SNAKE_CASE_ : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(f'Loading text from {args.file_path}' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: SCREAMING_SNAKE_CASE_ : Any = fp.readlines() logger.info('''Start encoding''' ) logger.info(f'{len(__a )} examples to process.' ) SCREAMING_SNAKE_CASE_ : int = [] SCREAMING_SNAKE_CASE_ : int = 0 SCREAMING_SNAKE_CASE_ : Optional[int] = 1_00_00 SCREAMING_SNAKE_CASE_ : Optional[Any] = time.time() for text in data: SCREAMING_SNAKE_CASE_ : Union[str, Any] = f'{bos} {text.strip()} {sep}' SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(__a , add_special_tokens=__a ) rslt.append(__a ) iter += 1 if iter % interval == 0: SCREAMING_SNAKE_CASE_ : Tuple = time.time() logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' ) SCREAMING_SNAKE_CASE_ : Tuple = time.time() logger.info('''Finished binarization''' ) logger.info(f'{len(__a )} examples processed.' ) SCREAMING_SNAKE_CASE_ : Any = f'{args.dump_file}.{args.tokenizer_name}.pickle' SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.vocab_size if vocab_size < (1 << 16): SCREAMING_SNAKE_CASE_ : Any = [np.uintaa(__a ) for d in rslt] else: SCREAMING_SNAKE_CASE_ : List[str] = [np.intaa(__a ) for d in rslt] random.shuffle(rslt_ ) logger.info(f'Dump to {dp_file}' ) with open(__a , '''wb''' ) as handle: pickle.dump(rslt_ , __a , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
91
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : List[str]=13 , lowercase_ : int=7 , lowercase_ : Any=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Dict=99 , lowercase_ : Union[str, Any]=24 , lowercase_ : int=2 , lowercase_ : List[str]=6 , lowercase_ : Any=37 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Any=2 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Optional[int]=None , lowercase_ : str=1000 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = parent SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length SCREAMING_SNAKE_CASE_ : List[Any] = is_training SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE_ : Optional[Any] = use_token_type_ids SCREAMING_SNAKE_CASE_ : int = use_labels SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size SCREAMING_SNAKE_CASE_ : List[str] = hidden_size SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size SCREAMING_SNAKE_CASE_ : Tuple = hidden_act SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Any = initializer_range SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels SCREAMING_SNAKE_CASE_ : Tuple = scope SCREAMING_SNAKE_CASE_ : Optional[int] = range_bbox def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 1] SCREAMING_SNAKE_CASE_ : str = t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE_ : List[str] = bbox[i, j, 2] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 0] SCREAMING_SNAKE_CASE_ : List[str] = t SCREAMING_SNAKE_CASE_ : Tuple = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ : List[str] = None SCREAMING_SNAKE_CASE_ : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ : Any = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = LiltModel(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : int = model(lowercase_ , bbox=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Optional[Any] = LiltForTokenClassification(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Tuple = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = LiltForQuestionAnswering(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : List[str] = config_and_inputs SCREAMING_SNAKE_CASE_ : str = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __UpperCamelCase = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str): '''simple docstring''' return True def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = LiltModelTester(self) SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_ : Dict = type self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Optional[int] = LiltModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) @require_torch @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(lowercase_) SCREAMING_SNAKE_CASE_ : str = torch.tensor([[1, 2]] , device=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Dict = model(input_ids=lowercase_ , bbox=lowercase_) SCREAMING_SNAKE_CASE_ : str = torch.Size([1, 2, 768]) SCREAMING_SNAKE_CASE_ : Dict = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=lowercase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowercase_) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1e-3))
91
1
"""simple docstring""" import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def _A (__a , __a ) -> List[Any]: """simple docstring""" if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer SCREAMING_SNAKE_CASE_ : List[str] = flax_key_tuple[:-1] + ('''weight''',) SCREAMING_SNAKE_CASE_ : Dict = torch.permute(__a , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__a ): # linear layer SCREAMING_SNAKE_CASE_ : Tuple = flax_key_tuple[:-1] + ('''weight''',) SCREAMING_SNAKE_CASE_ : int = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: SCREAMING_SNAKE_CASE_ : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',) return flax_key_tuple, flax_tensor def _A (__a , __a , __a ) -> Union[str, Any]: """simple docstring""" if "metadata" in layer: SCREAMING_SNAKE_CASE_ : Tuple = layer.split('''metadata''' ) SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(split_layer[0] )[:-1] SCREAMING_SNAKE_CASE_ : List[Any] = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )] elif "kvstore" in layer: SCREAMING_SNAKE_CASE_ : Any = layer.split('''kvstore''' ) SCREAMING_SNAKE_CASE_ : Tuple = ''''''.join(split_layer[0] )[:-1] SCREAMING_SNAKE_CASE_ : Optional[Any] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )] else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer.split('''/''' ) SCREAMING_SNAKE_CASE_ : Optional[int] = '''/'''.join(split_layer[:-1] ) SCREAMING_SNAKE_CASE_ : str = (split_layer[-1],) if "kvstore/path" in layer: SCREAMING_SNAKE_CASE_ : Optional[int] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: SCREAMING_SNAKE_CASE_ : Optional[Any] = '''file''' else: SCREAMING_SNAKE_CASE_ : int = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def _A (__a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = rename_keys(__a ) SCREAMING_SNAKE_CASE_ : List[str] = {} for k, v in current_block.items(): SCREAMING_SNAKE_CASE_ : Optional[Any] = v SCREAMING_SNAKE_CASE_ : Optional[Any] = new_current_block torch.save(__a , __a ) def _A (__a , __a , __a , __a , __a = WEIGHTS_NAME ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_file_size_to_int(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] SCREAMING_SNAKE_CASE_ : Any = {} SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ : List[Any] = 0 os.makedirs(__a , exist_ok=__a ) with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp: SCREAMING_SNAKE_CASE_ : Dict = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = flatten_dict(__a , sep='''/''' ) SCREAMING_SNAKE_CASE_ : Tuple = {} for layer in checkpoint_info.keys(): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = get_key_and_tensorstore_dict( __a , __a , __a ) if curr_real_layer_name in all_layers: SCREAMING_SNAKE_CASE_ : Any = content else: SCREAMING_SNAKE_CASE_ : int = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file SCREAMING_SNAKE_CASE_ : List[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(__a ) SCREAMING_SNAKE_CASE_ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = rename_base_flax_keys(tuple(key.split('''/''' ) ) , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = '''/'''.join(__a ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join( __a , weights_name.replace('''.bin''' , f'-{len(__a )+1:05d}-of-???.bin' ) ) rename_and_save_block(__a , __a ) sharded_state_dicts.append(current_block.keys() ) del current_block SCREAMING_SNAKE_CASE_ : Optional[Any] = {} SCREAMING_SNAKE_CASE_ : str = 0 SCREAMING_SNAKE_CASE_ : str = raw_weights.to(getattr(__a , __a ) ) current_block_size += weight_size total_size += weight_size # Add the last block SCREAMING_SNAKE_CASE_ : Dict = os.path.join(__a , weights_name.replace('''.bin''' , f'-{len(__a )+1:05d}-of-???.bin' ) ) rename_and_save_block(__a , __a ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__a ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index SCREAMING_SNAKE_CASE_ : Optional[int] = {} SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} for idx, shard in enumerate(__a ): SCREAMING_SNAKE_CASE_ : List[str] = weights_name.replace( '''.bin''' , f'-{idx+1:05d}-of-{len(__a ):05d}.bin' ) # len(sharded_state_dicts):05d} SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(__a , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(__a , os.path.join(__a , __a ) ) SCREAMING_SNAKE_CASE_ : Optional[int] = shard for key in shard: SCREAMING_SNAKE_CASE_ : Optional[Any] = shard_file # Add the metadata SCREAMING_SNAKE_CASE_ : Dict = {'''total_size''': total_size} SCREAMING_SNAKE_CASE_ : List[Any] = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__a , __a ) , '''w''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '''\n''' f.write(__a ) return metadata, index if __name__ == "__main__": UpperCAmelCase_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""") parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""", type=str, required=False, help="""Path to the output pytorch model.""", ) UpperCAmelCase_ : Any = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def _A () -> Tuple: """simple docstring""" from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer SCREAMING_SNAKE_CASE_ : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' ) config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' ) SCREAMING_SNAKE_CASE_ : Optional[int] = SwitchTransformersForConditionalGeneration.from_pretrained( '''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' ) SCREAMING_SNAKE_CASE_ : str = TaTokenizer.from_pretrained('''t5-small''' ) SCREAMING_SNAKE_CASE_ : str = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''' SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(__a , return_tensors='''pt''' ).input_ids SCREAMING_SNAKE_CASE_ : int = model.generate(__a , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
91
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCAmelCase_ : Dict = logging.getLogger(__name__) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser( description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)""" ) parser.add_argument( """--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset.""" ) parser.add_argument( """--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file.""" ) parser.add_argument("""--vocab_size""", default=30522, type=int) UpperCAmelCase_ : Optional[Any] = parser.parse_args() logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file, """rb""") as fp: UpperCAmelCase_ : Union[str, Any] = pickle.load(fp) logger.info("""Counting occurrences for MLM.""") UpperCAmelCase_ : Any = Counter() for tk_ids in data: counter.update(tk_ids) UpperCAmelCase_ : List[Any] = [0] * args.vocab_size for k, v in counter.items(): UpperCAmelCase_ : Dict = v logger.info(f'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, """wb""") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
91
1
"""simple docstring""" import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def _A (__a="" ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = tempfile.mkdtemp() return os.path.join(__a , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.rand(12 , dtype=torch.floataa) - 0.5 SCREAMING_SNAKE_CASE_ : Optional[Any] = AgentAudio(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4)) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(lowercase_)) # Ensure that the file contains the same value as the original tensor SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = sf.read(lowercase_) self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_) , atol=1e-4)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = torch.rand(12 , dtype=torch.floataa) - 0.5 SCREAMING_SNAKE_CASE_ : int = get_new_path(suffix='''.wav''') sf.write(lowercase_ , lowercase_ , 16000) SCREAMING_SNAKE_CASE_ : int = AgentAudio(lowercase_) self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4)) self.assertEqual(agent_type.to_string() , lowercase_) @require_vision @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = torch.randint(0 , 256 , (64, 64, 3)) SCREAMING_SNAKE_CASE_ : Dict = AgentImage(lowercase_) SCREAMING_SNAKE_CASE_ : int = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1e-4)) self.assertIsInstance(agent_type.to_raw() , Image.Image) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''')) / '''000000039769.png''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = AgentImage(lowercase_) self.assertTrue(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''')) / '''000000039769.png''' SCREAMING_SNAKE_CASE_ : List[str] = Image.open(lowercase_) SCREAMING_SNAKE_CASE_ : int = AgentImage(lowercase_) self.assertFalse(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowercase_)) class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = '''Hey!''' SCREAMING_SNAKE_CASE_ : int = AgentText(lowercase_) self.assertEqual(lowercase_ , agent_type.to_string()) self.assertEqual(lowercase_ , agent_type.to_raw()) self.assertEqual(lowercase_ , lowercase_)
91
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) def _A (__a , __a ) -> Tuple: """simple docstring""" try: with open(__a , '''rb''' ) as flax_state_f: SCREAMING_SNAKE_CASE_ : Optional[int] = from_bytes(__a , flax_state_f.read() ) except UnpicklingError as e: try: with open(__a ) as f: if f.read().startswith('''version''' ): raise OSError( '''You seem to have cloned a repository without having git-lfs installed. Please''' ''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the''' ''' folder you cloned.''' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(__a , __a ) def _A (__a , __a ) -> Tuple: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights SCREAMING_SNAKE_CASE_ : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , __a ) ).values() if any(__a ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.tree_util.tree_map( lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __a ) SCREAMING_SNAKE_CASE_ : int = '''''' SCREAMING_SNAKE_CASE_ : str = flatten_dict(__a , sep='''.''' ) SCREAMING_SNAKE_CASE_ : List[Any] = pt_model.state_dict() # keep track of unexpected & missing keys SCREAMING_SNAKE_CASE_ : str = [] SCREAMING_SNAKE_CASE_ : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple.split('''.''' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple_array[:-1] + ['''weight'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.transpose(__a , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": SCREAMING_SNAKE_CASE_ : Tuple = flax_key_tuple_array[:-1] + ['''weight'''] SCREAMING_SNAKE_CASE_ : Optional[int] = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": SCREAMING_SNAKE_CASE_ : Optional[int] = flax_key_tuple_array[:-1] + ['''weight'''] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(__a ): SCREAMING_SNAKE_CASE_ : List[str] = ( flax_key_tuple_string.replace('''_0''' , '''.0''' ) .replace('''_1''' , '''.1''' ) .replace('''_2''' , '''.2''' ) .replace('''_3''' , '''.3''' ) .replace('''_4''' , '''.4''' ) .replace('''_5''' , '''.5''' ) .replace('''_6''' , '''.6''' ) .replace('''_7''' , '''.7''' ) .replace('''_8''' , '''.8''' ) .replace('''_9''' , '''.9''' ) ) SCREAMING_SNAKE_CASE_ : Optional[Any] = '''.'''.join(__a ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(__a ) if not isinstance(__a , np.ndarray ) else flax_tensor SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(__a ) # remove from missing keys missing_keys.remove(__a ) else: # weight is not expected by PyTorch model unexpected_keys.append(__a ) pt_model.load_state_dict(__a ) # re-transform missing_keys to list SCREAMING_SNAKE_CASE_ : int = list(__a ) if len(__a ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) if len(__a ) > 0: logger.warning( f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) return pt_model
91
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "WhisperFeatureExtractor" __UpperCamelCase = "WhisperTokenizer" def __init__( self : List[str] , lowercase_ : List[Any] , lowercase_ : Dict): '''simple docstring''' super().__init__(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.feature_extractor SCREAMING_SNAKE_CASE_ : Dict = False def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=True): '''simple docstring''' return self.tokenizer.get_decoder_prompt_ids(task=lowercase_ , language=lowercase_ , no_timestamps=lowercase_) def __call__( self : Dict , *lowercase_ : Dict , **lowercase_ : Union[str, Any]): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_) SCREAMING_SNAKE_CASE_ : str = kwargs.pop('''sampling_rate''' , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('''text''' , lowercase_) if len(lowercase_) > 0: SCREAMING_SNAKE_CASE_ : Optional[int] = args[0] SCREAMING_SNAKE_CASE_ : Tuple = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''') if audio is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_) if text is not None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(lowercase_ , **lowercase_) if text is None: return inputs elif audio is None: return encodings else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = encodings['''input_ids'''] return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple): '''simple docstring''' return self.tokenizer.batch_decode(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[Any]): '''simple docstring''' return self.tokenizer.decode(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str , lowercase_ : Optional[Any]="np"): '''simple docstring''' return self.tokenizer.get_prompt_ids(lowercase_ , return_tensors=lowercase_)
91
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Any = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "openai-gpt" __UpperCamelCase = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[str] , lowercase_ : List[str]=40478 , lowercase_ : List[str]=512 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=1e-5 , lowercase_ : int=0.02 , lowercase_ : Optional[int]="cls_index" , lowercase_ : Any=True , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=0.1 , **lowercase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE_ : Tuple = n_positions SCREAMING_SNAKE_CASE_ : Optional[int] = n_embd SCREAMING_SNAKE_CASE_ : Dict = n_layer SCREAMING_SNAKE_CASE_ : Any = n_head SCREAMING_SNAKE_CASE_ : Union[str, Any] = afn SCREAMING_SNAKE_CASE_ : int = resid_pdrop SCREAMING_SNAKE_CASE_ : List[str] = embd_pdrop SCREAMING_SNAKE_CASE_ : Union[str, Any] = attn_pdrop SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range SCREAMING_SNAKE_CASE_ : List[str] = summary_type SCREAMING_SNAKE_CASE_ : Tuple = summary_use_proj SCREAMING_SNAKE_CASE_ : Union[str, Any] = summary_activation SCREAMING_SNAKE_CASE_ : Any = summary_first_dropout SCREAMING_SNAKE_CASE_ : List[str] = summary_proj_to_labels super().__init__(**lowercase_)
91
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = {"""tokenizer_file""": """tokenizer.json"""} UpperCAmelCase_ : Any = { """tokenizer_file""": { """bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""", """bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""", """bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""", """bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""", """bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""", """bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""", """bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""", }, } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = ["input_ids", "attention_mask"] __UpperCamelCase = None def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=None , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Any="<unk>" , lowercase_ : Any="<s>" , lowercase_ : int="</s>" , lowercase_ : str="<pad>" , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , **lowercase_ : Optional[Any] , ): '''simple docstring''' super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , add_prefix_space=lowercase_ , clean_up_tokenization_spaces=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , lowercase_) != add_prefix_space: SCREAMING_SNAKE_CASE_ : int = getattr(lowercase_ , pre_tok_state.pop('''type''')) SCREAMING_SNAKE_CASE_ : int = add_prefix_space SCREAMING_SNAKE_CASE_ : Any = pre_tok_class(**lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.get('''is_split_into_words''' , lowercase_) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with' ''' pretokenized inputs.''') return super()._batch_encode_plus(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''is_split_into_words''' , lowercase_) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with' ''' pretokenized inputs.''') return super()._encode_plus(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_) return tuple(lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : "Conversation"): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_) + [self.eos_token_id]) if len(lowercase_) > self.model_max_length: SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids[-self.model_max_length :] return input_ids
91
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[str] , *lowercase_ : Dict , **lowercase_ : Union[str, Any]): '''simple docstring''' warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_)
91
1
"""simple docstring""" import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : Any = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "bart" __UpperCamelCase = ["past_key_values"] __UpperCamelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : int , lowercase_ : Any=50265 , lowercase_ : List[str]=1024 , lowercase_ : List[Any]=12 , lowercase_ : str=4096 , lowercase_ : int=16 , lowercase_ : Optional[Any]=12 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[int]=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]="gelu" , lowercase_ : int=1024 , lowercase_ : Any=0.1 , lowercase_ : Dict=0.0 , lowercase_ : Any=0.0 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=False , lowercase_ : str=True , lowercase_ : Optional[int]=3 , lowercase_ : Optional[int]=1 , lowercase_ : Dict=0 , lowercase_ : Tuple=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=2 , lowercase_ : Dict=2 , **lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = vocab_size SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings SCREAMING_SNAKE_CASE_ : List[Any] = d_model SCREAMING_SNAKE_CASE_ : int = encoder_ffn_dim SCREAMING_SNAKE_CASE_ : Any = encoder_layers SCREAMING_SNAKE_CASE_ : Tuple = encoder_attention_heads SCREAMING_SNAKE_CASE_ : str = decoder_ffn_dim SCREAMING_SNAKE_CASE_ : str = decoder_layers SCREAMING_SNAKE_CASE_ : int = decoder_attention_heads SCREAMING_SNAKE_CASE_ : Any = dropout SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_dropout SCREAMING_SNAKE_CASE_ : int = activation_dropout SCREAMING_SNAKE_CASE_ : Optional[int] = activation_function SCREAMING_SNAKE_CASE_ : List[str] = init_std SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_layerdrop SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_layerdrop SCREAMING_SNAKE_CASE_ : Tuple = classifier_dropout SCREAMING_SNAKE_CASE_ : str = use_cache SCREAMING_SNAKE_CASE_ : Dict = encoder_layers SCREAMING_SNAKE_CASE_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , lowercase_): SCREAMING_SNAKE_CASE_ : str = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' '''The config can simply be saved and uploaded again to be fixed.''') class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_ : Dict = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ]) if self.use_past: SCREAMING_SNAKE_CASE_ : List[Any] = {0: '''batch'''} SCREAMING_SNAKE_CASE_ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: SCREAMING_SNAKE_CASE_ : Tuple = {0: '''batch''', 1: '''decoder_sequence'''} SCREAMING_SNAKE_CASE_ : Any = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction='''inputs''') elif self.task == "causal-lm": # TODO: figure this case out. SCREAMING_SNAKE_CASE_ : str = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ]) if self.use_past: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_layers for i in range(lowercase_): SCREAMING_SNAKE_CASE_ : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''} SCREAMING_SNAKE_CASE_ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: SCREAMING_SNAKE_CASE_ : List[Any] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ]) return common_inputs @property def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_ : List[Any] = super().outputs else: SCREAMING_SNAKE_CASE_ : List[str] = super(lowercase_ , self).outputs if self.use_past: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_layers for i in range(lowercase_): SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} SCREAMING_SNAKE_CASE_ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) # Generate decoder inputs SCREAMING_SNAKE_CASE_ : Union[str, Any] = seq_length if not self.use_past else 1 SCREAMING_SNAKE_CASE_ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} SCREAMING_SNAKE_CASE_ : Dict = dict(**lowercase_ , **lowercase_) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = common_inputs['''input_ids'''].shape SCREAMING_SNAKE_CASE_ : Any = common_inputs['''decoder_input_ids'''].shape[1] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.num_attention_heads SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_seq_length + 3 SCREAMING_SNAKE_CASE_ : Tuple = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) SCREAMING_SNAKE_CASE_ : str = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_)] , dim=1) SCREAMING_SNAKE_CASE_ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.num_layers SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = max(lowercase_ , lowercase_) - min_num_layers SCREAMING_SNAKE_CASE_ : List[Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase_): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_), torch.zeros(lowercase_), torch.zeros(lowercase_), torch.zeros(lowercase_), )) # TODO: test this. SCREAMING_SNAKE_CASE_ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase_ , lowercase_): common_inputs["past_key_values"].append((torch.zeros(lowercase_), torch.zeros(lowercase_))) return common_inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE_ : Tuple = seqlen + 2 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.num_layers SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.num_attention_heads SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) SCREAMING_SNAKE_CASE_ : Optional[int] = common_inputs['''attention_mask'''].dtype SCREAMING_SNAKE_CASE_ : str = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_)] , dim=1) SCREAMING_SNAKE_CASE_ : str = [ (torch.zeros(lowercase_), torch.zeros(lowercase_)) for _ in range(lowercase_) ] return common_inputs def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.num_special_tokens_to_add(lowercase_) SCREAMING_SNAKE_CASE_ : str = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_) # Generate dummy inputs according to compute batch and sequence SCREAMING_SNAKE_CASE_ : Tuple = [''' '''.join([tokenizer.unk_token]) * seq_length] * batch_size SCREAMING_SNAKE_CASE_ : Dict = dict(tokenizer(lowercase_ , return_tensors=lowercase_)) return common_inputs def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_) elif self.task == "causal-lm": SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_inputs_for_causal_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_) else: SCREAMING_SNAKE_CASE_ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_) return common_inputs def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : Tuple , lowercase_ : str): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_ : Dict = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_) else: SCREAMING_SNAKE_CASE_ : Tuple = super(lowercase_ , self)._flatten_past_key_values_( lowercase_ , lowercase_ , lowercase_ , lowercase_)
91
"""simple docstring""" import random from typing import Any def _A (__a ) -> list[Any]: """simple docstring""" for _ in range(len(__a ) ): SCREAMING_SNAKE_CASE_ : Optional[int] = random.randint(0 , len(__a ) - 1 ) SCREAMING_SNAKE_CASE_ : Tuple = random.randint(0 , len(__a ) - 1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = data[b], data[a] return data if __name__ == "__main__": UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5, 6, 7] UpperCAmelCase_ : Dict = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
91
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "philschmid/bart-large-cnn-samsum" __UpperCamelCase = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) __UpperCamelCase = "summarizer" __UpperCamelCase = AutoTokenizer __UpperCamelCase = AutoModelForSeqaSeqLM __UpperCamelCase = ["text"] __UpperCamelCase = ["text"] def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Any): '''simple docstring''' return self.pre_processor(lowercase_ , return_tensors='''pt''' , truncation=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str]): '''simple docstring''' return self.model.generate(**lowercase_)[0] def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[int]): '''simple docstring''' return self.pre_processor.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_)
91
"""simple docstring""" import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _A (__a , __a , __a ) -> Dict: """simple docstring""" if gpta_config_file == "": SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaConfig() else: SCREAMING_SNAKE_CASE_ : Tuple = GPTaConfig.from_json_file(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaModel(__a ) # Load weights from numpy load_tf_weights_in_gpta(__a , __a , __a ) # Save pytorch-model SCREAMING_SNAKE_CASE_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME SCREAMING_SNAKE_CASE_ : List[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , __a ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(__a , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--gpt2_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
91
1
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) UpperCAmelCase_ : Any = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase_ : str = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) UpperCAmelCase_ : Tuple = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) UpperCAmelCase_ : Optional[Any] = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) UpperCAmelCase_ : List[Any] = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) UpperCAmelCase_ : int = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) UpperCAmelCase_ : Optional[int] = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) UpperCAmelCase_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_MAPPING UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ : List[str] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ : Optional[int] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ : Any = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ : Dict = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ : Any = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
91
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
91
1
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 0.0 __UpperCamelCase = 1 __UpperCamelCase = 1 __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = [] SCREAMING_SNAKE_CASE_ : Optional[int] = [] for i in range(self.num_layers): SCREAMING_SNAKE_CASE_ : int = self.in_channels if i == 0 else self.out_channels SCREAMING_SNAKE_CASE_ : Tuple = FlaxResnetBlockaD( in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowercase_) SCREAMING_SNAKE_CASE_ : int = resnets SCREAMING_SNAKE_CASE_ : int = attentions if self.add_downsample: SCREAMING_SNAKE_CASE_ : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype) def __call__( self : List[Any] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Tuple=True): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = () for resnet, attn in zip(self.resnets , self.attentions): SCREAMING_SNAKE_CASE_ : List[Any] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = attn(lowercase_ , lowercase_ , deterministic=lowercase_) output_states += (hidden_states,) if self.add_downsample: SCREAMING_SNAKE_CASE_ : Any = self.downsamplers_a(lowercase_) output_states += (hidden_states,) return hidden_states, output_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 0.0 __UpperCamelCase = 1 __UpperCamelCase = True __UpperCamelCase = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = [] for i in range(self.num_layers): SCREAMING_SNAKE_CASE_ : Any = self.in_channels if i == 0 else self.out_channels SCREAMING_SNAKE_CASE_ : int = FlaxResnetBlockaD( in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_) SCREAMING_SNAKE_CASE_ : int = resnets if self.add_downsample: SCREAMING_SNAKE_CASE_ : List[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype) def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : List[str]=True): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = () for resnet in self.resnets: SCREAMING_SNAKE_CASE_ : List[Any] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_) output_states += (hidden_states,) if self.add_downsample: SCREAMING_SNAKE_CASE_ : Optional[int] = self.downsamplers_a(lowercase_) output_states += (hidden_states,) return hidden_states, output_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 0.0 __UpperCamelCase = 1 __UpperCamelCase = 1 __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] SCREAMING_SNAKE_CASE_ : Optional[int] = [] for i in range(self.num_layers): SCREAMING_SNAKE_CASE_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels SCREAMING_SNAKE_CASE_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels SCREAMING_SNAKE_CASE_ : Any = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_) SCREAMING_SNAKE_CASE_ : Any = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = resnets SCREAMING_SNAKE_CASE_ : Tuple = attentions if self.add_upsample: SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype) def __call__( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Tuple=True): '''simple docstring''' for resnet, attn in zip(self.resnets , self.attentions): # pop res hidden states SCREAMING_SNAKE_CASE_ : List[Any] = res_hidden_states_tuple[-1] SCREAMING_SNAKE_CASE_ : Optional[Any] = res_hidden_states_tuple[:-1] SCREAMING_SNAKE_CASE_ : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1) SCREAMING_SNAKE_CASE_ : int = resnet(lowercase_ , lowercase_ , deterministic=lowercase_) SCREAMING_SNAKE_CASE_ : Any = attn(lowercase_ , lowercase_ , deterministic=lowercase_) if self.add_upsample: SCREAMING_SNAKE_CASE_ : int = self.upsamplers_a(lowercase_) return hidden_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 0.0 __UpperCamelCase = 1 __UpperCamelCase = True __UpperCamelCase = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = [] for i in range(self.num_layers): SCREAMING_SNAKE_CASE_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels SCREAMING_SNAKE_CASE_ : List[Any] = self.prev_output_channel if i == 0 else self.out_channels SCREAMING_SNAKE_CASE_ : List[Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = resnets if self.add_upsample: SCREAMING_SNAKE_CASE_ : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype) def __call__( self : List[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple=True): '''simple docstring''' for resnet in self.resnets: # pop res hidden states SCREAMING_SNAKE_CASE_ : Optional[Any] = res_hidden_states_tuple[-1] SCREAMING_SNAKE_CASE_ : Dict = res_hidden_states_tuple[:-1] SCREAMING_SNAKE_CASE_ : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1) SCREAMING_SNAKE_CASE_ : Optional[int] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_) if self.add_upsample: SCREAMING_SNAKE_CASE_ : Any = self.upsamplers_a(lowercase_) return hidden_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = 0.0 __UpperCamelCase = 1 __UpperCamelCase = 1 __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] SCREAMING_SNAKE_CASE_ : str = [] for _ in range(self.num_layers): SCREAMING_SNAKE_CASE_ : Any = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowercase_) SCREAMING_SNAKE_CASE_ : Dict = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowercase_) SCREAMING_SNAKE_CASE_ : str = resnets SCREAMING_SNAKE_CASE_ : Tuple = attentions def __call__( self : Any , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Optional[Any]=True): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.resnets[0](lowercase_ , lowercase_) for attn, resnet in zip(self.attentions , self.resnets[1:]): SCREAMING_SNAKE_CASE_ : Optional[Any] = attn(lowercase_ , lowercase_ , deterministic=lowercase_) SCREAMING_SNAKE_CASE_ : Any = resnet(lowercase_ , lowercase_ , deterministic=lowercase_) return hidden_states
91
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL UpperCAmelCase_ : int = logging.get_logger(__name__) def _A (__a ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__a ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ): '''simple docstring''' super().__init__(**lowercase_) SCREAMING_SNAKE_CASE_ : str = size if size is not None else {'''shortest_edge''': 256} SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''') SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize SCREAMING_SNAKE_CASE_ : List[Any] = size SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop SCREAMING_SNAKE_CASE_ : Dict = crop_size SCREAMING_SNAKE_CASE_ : List[Any] = resample SCREAMING_SNAKE_CASE_ : List[str] = do_rescale SCREAMING_SNAKE_CASE_ : List[str] = rescale_factor SCREAMING_SNAKE_CASE_ : List[Any] = offset SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize SCREAMING_SNAKE_CASE_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_) if "shortest_edge" in size: SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(lowercase_ , size['''shortest_edge'''] , default_to_square=lowercase_) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE_ : Union[str, Any] = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}') return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_) if "height" not in size or "width" not in size: raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}') return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = image.astype(np.floataa) if offset: SCREAMING_SNAKE_CASE_ : Tuple = image - (scale / 2) return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''') # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ : List[str] = to_numpy_array(lowercase_) if do_resize: SCREAMING_SNAKE_CASE_ : List[Any] = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) if do_center_crop: SCREAMING_SNAKE_CASE_ : Dict = self.center_crop(lowercase_ , size=lowercase_) if do_rescale: SCREAMING_SNAKE_CASE_ : int = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_) if do_normalize: SCREAMING_SNAKE_CASE_ : Dict = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = to_channel_dimension_format(lowercase_ , lowercase_) return image def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ : Dict = offset if offset is not None else self.offset SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ : Dict = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''') if not valid_images(lowercase_): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') SCREAMING_SNAKE_CASE_ : Tuple = make_batched(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = [ [ self._preprocess_image( image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , ) for img in video ] for video in videos ] SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': videos} return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
91
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase_ : Dict = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } UpperCAmelCase_ : List[str] = { """gpt2""": 1024, """gpt2-medium""": 1024, """gpt2-large""": 1024, """gpt2-xl""": 1024, """distilgpt2""": 1024, } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] __UpperCamelCase = GPTaTokenizer def __init__( self : Optional[int] , lowercase_ : int=None , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple="<|endoftext|>" , lowercase_ : str="<|endoftext|>" , lowercase_ : Dict="<|endoftext|>" , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ): '''simple docstring''' super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('''add_bos_token''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , lowercase_) != add_prefix_space: SCREAMING_SNAKE_CASE_ : int = getattr(lowercase_ , pre_tok_state.pop('''type''')) SCREAMING_SNAKE_CASE_ : str = add_prefix_space SCREAMING_SNAKE_CASE_ : Dict = pre_tok_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = add_prefix_space def _SCREAMING_SNAKE_CASE ( self : str , *lowercase_ : List[Any] , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''is_split_into_words''' , lowercase_) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowercase_ : List[str] , **lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = kwargs.get('''is_split_into_words''' , lowercase_) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_) return tuple(lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : "Conversation"): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_) + [self.eos_token_id]) if len(lowercase_) > self.model_max_length: SCREAMING_SNAKE_CASE_ : Any = input_ids[-self.model_max_length :] return input_ids
91
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase_ : Dict = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } UpperCAmelCase_ : List[str] = { """gpt2""": 1024, """gpt2-medium""": 1024, """gpt2-large""": 1024, """gpt2-xl""": 1024, """distilgpt2""": 1024, } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] __UpperCamelCase = GPTaTokenizer def __init__( self : Optional[int] , lowercase_ : int=None , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple="<|endoftext|>" , lowercase_ : str="<|endoftext|>" , lowercase_ : Dict="<|endoftext|>" , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ): '''simple docstring''' super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('''add_bos_token''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , lowercase_) != add_prefix_space: SCREAMING_SNAKE_CASE_ : int = getattr(lowercase_ , pre_tok_state.pop('''type''')) SCREAMING_SNAKE_CASE_ : str = add_prefix_space SCREAMING_SNAKE_CASE_ : Dict = pre_tok_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = add_prefix_space def _SCREAMING_SNAKE_CASE ( self : str , *lowercase_ : List[Any] , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''is_split_into_words''' , lowercase_) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowercase_ : List[str] , **lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = kwargs.get('''is_split_into_words''' , lowercase_) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_) return tuple(lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : "Conversation"): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_) + [self.eos_token_id]) if len(lowercase_) > self.model_max_length: SCREAMING_SNAKE_CASE_ : Any = input_ids[-self.model_max_length :] return input_ids
91
1
"""simple docstring""" from sklearn.metrics import fa_score import datasets UpperCAmelCase_ : Tuple = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ UpperCAmelCase_ : Dict = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ UpperCAmelCase_ : Any = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''')), '''references''': datasets.Sequence(datasets.Value('''int32''')), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32'''), '''references''': datasets.Value('''int32'''), }) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : int=None , lowercase_ : int=1 , lowercase_ : Any="binary" , lowercase_ : Dict=None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = fa_score( lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_) return {"f1": float(lowercase_) if score.size == 1 else score}
91
"""simple docstring""" import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(lowercase_ , '''hidden_sizes''')) self.parent.assertTrue(hasattr(lowercase_ , '''num_attention_heads''')) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=13 , lowercase_ : Dict=64 , lowercase_ : Dict=3 , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=2 , lowercase_ : Any=1 , lowercase_ : List[Any]=16 , lowercase_ : int=[128, 256, 384] , lowercase_ : str=[4, 6, 8] , lowercase_ : Optional[Any]=[2, 3, 4] , lowercase_ : Union[str, Any]=[16, 16, 16] , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[int]=[2, 2, 2] , lowercase_ : Any=[2, 2, 2] , lowercase_ : List[str]=0.02 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=2 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = parent SCREAMING_SNAKE_CASE_ : Any = batch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size SCREAMING_SNAKE_CASE_ : int = num_channels SCREAMING_SNAKE_CASE_ : List[Any] = kernel_size SCREAMING_SNAKE_CASE_ : Optional[Any] = stride SCREAMING_SNAKE_CASE_ : List[str] = padding SCREAMING_SNAKE_CASE_ : int = hidden_sizes SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE_ : int = depths SCREAMING_SNAKE_CASE_ : Optional[Any] = key_dim SCREAMING_SNAKE_CASE_ : Optional[Any] = drop_path_rate SCREAMING_SNAKE_CASE_ : Tuple = patch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_ratio SCREAMING_SNAKE_CASE_ : str = mlp_ratio SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE_ : List[Any] = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] SCREAMING_SNAKE_CASE_ : Any = is_training SCREAMING_SNAKE_CASE_ : Tuple = use_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_labels) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = LevitModel(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_) SCREAMING_SNAKE_CASE_ : Any = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = image_size[0], image_size[1] for _ in range(4): SCREAMING_SNAKE_CASE_ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1) SCREAMING_SNAKE_CASE_ : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.num_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitForImageClassification(lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) __UpperCamelCase = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitModelTester(self) SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' return @unittest.skip(reason='''Levit does not use inputs_embeds''') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' pass @unittest.skip(reason='''Levit does not support input and output embeddings''') def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' pass @unittest.skip(reason='''Levit does not output attentions''') def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ : Dict = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' def check_hidden_states_output(lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str): SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_)) SCREAMING_SNAKE_CASE_ : str = outputs.hidden_states SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.model_tester.depths) + 1 self.assertEqual(len(lowercase_) , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size[0], image_size[1] for _ in range(4): SCREAMING_SNAKE_CASE_ : Optional[Any] = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1) SCREAMING_SNAKE_CASE_ : Optional[int] = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ : Tuple = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : Union[str, Any] = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowercase_) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_) model.to(lowercase_) model.train() SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase_).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ : Union[str, Any] = False SCREAMING_SNAKE_CASE_ : Optional[int] = True for model_class in self.all_model_classes: if model_class in get_values(lowercase_) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE_ : List[str] = model_class(lowercase_) model.gradient_checkpointing_enable() model.to(lowercase_) model.train() SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model(**lowercase_).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : List[Any] = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowercase_), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}'): SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''title'''] SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''num_labels'''] SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_) model.to(lowercase_) model.train() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE_ : str = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels''']) SCREAMING_SNAKE_CASE_ : Any = inputs['''labels'''].to(problem_type['''dtype''']) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowercase_) as warning_list: SCREAMING_SNAKE_CASE_ : int = model(**lowercase_).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( F'Something is going wrong in the regression problem: intercepted {w.message}') loss.backward() @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Optional[Any] = LevitModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) def _A () -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.default_image_processor SCREAMING_SNAKE_CASE_ : str = prepare_img() SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Any = model(**lowercase_) # verify the logits SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([1.04_48, -0.37_45, -1.83_17]).to(lowercase_) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
91
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : str = { """andreasmadsen/efficient_mlm_m0.40""": ( """https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json""" ), } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "roberta-prelayernorm" def __init__( self : Optional[Any] , lowercase_ : List[str]=50265 , lowercase_ : Union[str, Any]=768 , lowercase_ : List[str]=12 , lowercase_ : List[Any]=12 , lowercase_ : List[str]=3072 , lowercase_ : List[Any]="gelu" , lowercase_ : Any=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : List[str]=2 , lowercase_ : Optional[int]=0.02 , lowercase_ : Tuple=1e-12 , lowercase_ : List[str]=1 , lowercase_ : Optional[int]=0 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int=True , lowercase_ : int=None , **lowercase_ : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : int = vocab_size SCREAMING_SNAKE_CASE_ : Any = hidden_size SCREAMING_SNAKE_CASE_ : int = num_hidden_layers SCREAMING_SNAKE_CASE_ : int = num_attention_heads SCREAMING_SNAKE_CASE_ : Dict = hidden_act SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings SCREAMING_SNAKE_CASE_ : Any = type_vocab_size SCREAMING_SNAKE_CASE_ : Dict = initializer_range SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps SCREAMING_SNAKE_CASE_ : Any = position_embedding_type SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_cache SCREAMING_SNAKE_CASE_ : List[str] = classifier_dropout class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ])
91
"""simple docstring""" from math import factorial def _A (__a = 20 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... SCREAMING_SNAKE_CASE_ : List[str] = n // 2 return int(factorial(__a ) / (factorial(__a ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: UpperCAmelCase_ : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number.""")
91
1
"""simple docstring""" def _A (__a ) -> int: """simple docstring""" if not isinstance(__a , __a ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCAmelCase_ : Any = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : List[str]): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_)
91
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase UpperCAmelCase_ : Tuple = logging.get_logger(__name__) UpperCAmelCase_ : str = { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""", """allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""", """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json""" ), } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "longformer" def __init__( self : Optional[int] , lowercase_ : Union[List[int], int] = 512 , lowercase_ : int = 2 , lowercase_ : int = 1 , lowercase_ : int = 0 , lowercase_ : int = 2 , lowercase_ : int = 30522 , lowercase_ : int = 768 , lowercase_ : int = 12 , lowercase_ : int = 12 , lowercase_ : int = 3072 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 512 , lowercase_ : int = 2 , lowercase_ : float = 0.02 , lowercase_ : float = 1e-12 , lowercase_ : bool = False , **lowercase_ : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : Dict = attention_window SCREAMING_SNAKE_CASE_ : Dict = sep_token_id SCREAMING_SNAKE_CASE_ : List[str] = bos_token_id SCREAMING_SNAKE_CASE_ : List[Any] = eos_token_id SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE_ : str = hidden_size SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE_ : List[str] = hidden_act SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : str = max_position_embeddings SCREAMING_SNAKE_CASE_ : List[Any] = type_vocab_size SCREAMING_SNAKE_CASE_ : str = initializer_range SCREAMING_SNAKE_CASE_ : Dict = layer_norm_eps SCREAMING_SNAKE_CASE_ : List[str] = onnx_export class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : int , lowercase_ : "PretrainedConfig" , lowercase_ : str = "default" , lowercase_ : "List[PatchingSpec]" = None): '''simple docstring''' super().__init__(lowercase_ , lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = True @property def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE_ : Any = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''global_attention_mask''', dynamic_axis), ]) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = super().outputs if self.task == "default": SCREAMING_SNAKE_CASE_ : List[str] = {0: '''batch'''} return outputs @property def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' return 1e-4 @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return max(super().default_onnx_opset , 14) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : "PreTrainedTokenizerBase" , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = super().generate_dummy_inputs( preprocessor=lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.zeros_like(inputs['''input_ids''']) # make every second token global SCREAMING_SNAKE_CASE_ : List[str] = 1 return inputs
91
"""simple docstring""" from __future__ import annotations class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : int = 0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE_ : List[str] = '''''' for ch in content: ans += chr(ord(lowercase_) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE_ : List[Any] = '''''' for ch in content: ans += chr(ord(lowercase_) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) try: with open(lowercase_) as fin, open('''encrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowercase_ , lowercase_)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) try: with open(lowercase_) as fin, open('''decrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowercase_ , lowercase_)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
91
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6}, } } SCREAMING_SNAKE_CASE_ : List[str] = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 128, '''task_specific_params.summarization.min_length''': 12, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 142, '''task_specific_params.summarization_cnn.min_length''': 56, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 62, '''task_specific_params.summarization_xsum.min_length''': 11, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(lowercase_) , lowercase_) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3 , 4) self.assertTrue(np.allclose(transpose(lowercase_) , x.transpose())) SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3 , 4 , 5) self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , x.transpose((1, 2, 0)))) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = np.random.randn(3 , 4) SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(lowercase_) self.assertTrue(np.allclose(transpose(lowercase_) , transpose(lowercase_).numpy())) SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.randn(3 , 4 , 5) SCREAMING_SNAKE_CASE_ : str = torch.tensor(lowercase_) self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , transpose(lowercase_ , axes=(1, 2, 0)).numpy())) @require_tf def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = np.random.randn(3 , 4) SCREAMING_SNAKE_CASE_ : int = tf.constant(lowercase_) self.assertTrue(np.allclose(transpose(lowercase_) , transpose(lowercase_).numpy())) SCREAMING_SNAKE_CASE_ : int = np.random.randn(3 , 4 , 5) SCREAMING_SNAKE_CASE_ : List[str] = tf.constant(lowercase_) self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , transpose(lowercase_ , axes=(1, 2, 0)).numpy())) @require_flax def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3 , 4) SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.array(lowercase_) self.assertTrue(np.allclose(transpose(lowercase_) , np.asarray(transpose(lowercase_)))) SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.randn(3 , 4 , 5) SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.array(lowercase_) self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , np.asarray(transpose(lowercase_ , axes=(1, 2, 0))))) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3 , 4) self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , np.reshape(lowercase_ , (4, 3)))) SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3 , 4 , 5) self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , np.reshape(lowercase_ , (12, 5)))) @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3 , 4) SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(lowercase_) self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , reshape(lowercase_ , (4, 3)).numpy())) SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3 , 4 , 5) SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(lowercase_) self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , reshape(lowercase_ , (12, 5)).numpy())) @require_tf def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = np.random.randn(3 , 4) SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(lowercase_) self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , reshape(lowercase_ , (4, 3)).numpy())) SCREAMING_SNAKE_CASE_ : int = np.random.randn(3 , 4 , 5) SCREAMING_SNAKE_CASE_ : Tuple = tf.constant(lowercase_) self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , reshape(lowercase_ , (12, 5)).numpy())) @require_flax def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3 , 4) SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.array(lowercase_) self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , np.asarray(reshape(lowercase_ , (4, 3))))) SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3 , 4 , 5) SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(lowercase_) self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , np.asarray(reshape(lowercase_ , (12, 5))))) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.randn(1 , 3 , 4) self.assertTrue(np.allclose(squeeze(lowercase_) , np.squeeze(lowercase_))) SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1 , 4 , 1 , 5) self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , np.squeeze(lowercase_ , axis=2))) @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1 , 3 , 4) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(lowercase_) self.assertTrue(np.allclose(squeeze(lowercase_) , squeeze(lowercase_).numpy())) SCREAMING_SNAKE_CASE_ : int = np.random.randn(1 , 4 , 1 , 5) SCREAMING_SNAKE_CASE_ : Any = torch.tensor(lowercase_) self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , squeeze(lowercase_ , axis=2).numpy())) @require_tf def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(1 , 3 , 4) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(lowercase_) self.assertTrue(np.allclose(squeeze(lowercase_) , squeeze(lowercase_).numpy())) SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1 , 4 , 1 , 5) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(lowercase_) self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , squeeze(lowercase_ , axis=2).numpy())) @require_flax def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.randn(1 , 3 , 4) SCREAMING_SNAKE_CASE_ : int = jnp.array(lowercase_) self.assertTrue(np.allclose(squeeze(lowercase_) , np.asarray(squeeze(lowercase_)))) SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5) SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(lowercase_) self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , np.asarray(squeeze(lowercase_ , axis=2)))) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = np.random.randn(3 , 4) self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , np.expand_dims(lowercase_ , axis=1))) @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3 , 4) SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(lowercase_) self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , expand_dims(lowercase_ , axis=1).numpy())) @require_tf def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3 , 4) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(lowercase_) self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , expand_dims(lowercase_ , axis=1).numpy())) @require_flax def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3 , 4) SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(lowercase_) self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , np.asarray(expand_dims(lowercase_ , axis=1))))
91
"""simple docstring""" def _A (__a = 50 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
91
1
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black UpperCAmelCase_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. UpperCAmelCase_ : List[Any] = """ def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states """ class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''')) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.transformer_dir shutil.copy( os.path.join(lowercase_ , '''src/transformers/models/bert/modeling_bert.py''') , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''') , ) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = '''src/transformers''' shutil.rmtree(self.transformer_dir) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : Tuple=None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = comment + F'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: SCREAMING_SNAKE_CASE_ : List[Any] = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result SCREAMING_SNAKE_CASE_ : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119) SCREAMING_SNAKE_CASE_ : Optional[int] = black.format_str(lowercase_ , mode=lowercase_) SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.transformer_dir , '''new_code.py''') with open(lowercase_ , '''w''' , newline='''\n''') as f: f.write(lowercase_) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowercase_)) == 0) else: check_copies.is_copy_consistent(f.name , overwrite=lowercase_) with open(lowercase_ , '''r''') as f: self.assertTrue(f.read() , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''') self.assertEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , lowercase_ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , lowercase_) , ) # Copy consistency with a really long name SCREAMING_SNAKE_CASE_ : Optional[Any] = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub('''Bert''' , lowercase_ , lowercase_) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , lowercase_ , overwrite_result=re.sub('''Bert''' , '''TestModel''' , lowercase_) , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = check_copies.LOCALIZED_READMES['''README_zh-hans.md'''] SCREAMING_SNAKE_CASE_ : Any = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),''' ''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**''' ''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders''' ''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang''' ''' Luong, Quoc V. Le, Christopher D. Manning.''' ) SCREAMING_SNAKE_CASE_ : Tuple = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) SCREAMING_SNAKE_CASE_ : Optional[int] = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文''' ''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自''' ''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather''' ''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,''' ''' Christopher D. Manning 发布。\n''' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = check_copies.convert_to_localized_md( lowercase_ , lowercase_ , localized_readme['''format_model_list''']) self.assertFalse(lowercase_) self.assertEqual(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = check_copies.convert_to_localized_md( lowercase_ , lowercase_ , localized_readme['''format_model_list''']) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(lowercase_) SCREAMING_SNAKE_CASE_ : Dict = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( '''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and''' ''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = check_copies.convert_to_localized_md( lowercase_ , lowercase_ , localized_readme['''format_model_list''']) # Check if the model link is synchronized. self.assertEqual(lowercase_ , lowercase_)
91
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = (PNDMScheduler,) __UpperCamelCase = (("num_inference_steps", 5_0),) def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = { '''num_train_timesteps''': 1000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowercase_) return config def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]=0 , **lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class.from_pretrained(lowercase_) new_scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Optional[Any] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]=0 , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(lowercase_) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Any = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Tuple = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : str , **lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Dict = 10 SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_) for i, t in enumerate(scheduler.prk_timesteps): SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample for i, t in enumerate(scheduler.plms_timesteps): SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_).prev_sample return sample def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''num_inference_steps''' , lowercase_) for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps'''): scheduler.set_timesteps(lowercase_) elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps'''): SCREAMING_SNAKE_CASE_ : Optional[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] SCREAMING_SNAKE_CASE_ : Optional[int] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Any = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config(steps_offset=1) SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**lowercase_) scheduler.set_timesteps(10) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , ) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]): self.check_over_forward(num_inference_steps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27 for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample SCREAMING_SNAKE_CASE_ : str = 0.1 * sample SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2]): SCREAMING_SNAKE_CASE_ : int = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' with self.assertRaises(lowercase_): SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**lowercase_) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.full_loop() SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_98.13_18) < 1e-2 assert abs(result_mean.item() - 0.25_80) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.full_loop(prediction_type='''v_prediction''') SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 67.39_86) < 1e-2 assert abs(result_mean.item() - 0.08_78) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 2_30.03_99) < 1e-2 assert abs(result_mean.item() - 0.29_95) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_86.94_82) < 1e-2 assert abs(result_mean.item() - 0.24_34) < 1e-3
91
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING UpperCAmelCase_ : Any = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "upernet" def __init__( self : Any , lowercase_ : int=None , lowercase_ : Any=512 , lowercase_ : List[Any]=0.02 , lowercase_ : int=[1, 2, 3, 6] , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=0.4 , lowercase_ : int=384 , lowercase_ : Optional[Any]=256 , lowercase_ : str=1 , lowercase_ : List[str]=False , lowercase_ : str=255 , **lowercase_ : str , ): '''simple docstring''' super().__init__(**lowercase_) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''') SCREAMING_SNAKE_CASE_ : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4''']) elif isinstance(lowercase_ , lowercase_): SCREAMING_SNAKE_CASE_ : str = backbone_config.get('''model_type''') SCREAMING_SNAKE_CASE_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_ : str = config_class.from_dict(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = backbone_config SCREAMING_SNAKE_CASE_ : List[Any] = hidden_size SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range SCREAMING_SNAKE_CASE_ : Any = pool_scales SCREAMING_SNAKE_CASE_ : int = use_auxiliary_head SCREAMING_SNAKE_CASE_ : str = auxiliary_loss_weight SCREAMING_SNAKE_CASE_ : Dict = auxiliary_in_channels SCREAMING_SNAKE_CASE_ : Union[str, Any] = auxiliary_channels SCREAMING_SNAKE_CASE_ : Optional[Any] = auxiliary_num_convs SCREAMING_SNAKE_CASE_ : Any = auxiliary_concat_input SCREAMING_SNAKE_CASE_ : Tuple = loss_ignore_index def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_ : Any = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE_ : str = self.__class__.model_type return output
91
"""simple docstring""" import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @parameterized.expand([(None,), ('''foo.json''',)]) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50) self.assertEqual(loaded_config.max_length , 20) self.assertEqual(loaded_config.max_time , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained('''gpt2''') SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_model_config(lowercase_) SCREAMING_SNAKE_CASE_ : int = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = GenerationConfig() SCREAMING_SNAKE_CASE_ : Any = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = generation_config.update(**lowercase_) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {'''foo''': '''bar'''}) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig() SCREAMING_SNAKE_CASE_ : List[str] = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''') as tmp_dir: generation_config.save_pretrained(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = GenerationConfig.from_pretrained(lowercase_) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''') SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig.from_model_config(lowercase_) assert not hasattr(lowercase_ , '''foo''') # no new kwargs should be initialized if from config def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , lowercase_) self.assertEqual(default_config.num_beams , 1) SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , lowercase_) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , lowercase_) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = TOKEN HfFolder.save_token(lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str]): '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-generation-config''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''') except HTTPError: pass def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_pretrained(F'{USER}/test-generation-config') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id='''test-generation-config''' , push_to_hub=lowercase_ , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Optional[int] = GenerationConfig.from_pretrained(F'{USER}/test-generation-config') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowercase_ , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
91
1
"""simple docstring""" def _A (__a , __a ) -> int: """simple docstring""" while a != 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = b % a, a return b def _A (__a , __a ) -> int: """simple docstring""" if gcd(__a , __a ) != 1: SCREAMING_SNAKE_CASE_ : Any = f'mod inverse of {a!r} and {m!r} does not exist' raise ValueError(__a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = 1, 0, a SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = 0, 1, m while va != 0: SCREAMING_SNAKE_CASE_ : Any = ua // va SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
91
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets UpperCAmelCase_ : Optional[Any] = datasets.logging.get_logger(__name__) UpperCAmelCase_ : List[str] = """\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\", author = \"Moosavi, Nafise Sadat and Strube, Michael\", booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\", month = aug, year = \"2016\", address = \"Berlin, Germany\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/P16-1060\", doi = \"10.18653/v1/P16-1060\", pages = \"632--642\", } """ UpperCAmelCase_ : Tuple = """\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. """ UpperCAmelCase_ : Union[str, Any] = """ Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting 'keep_singletons=False', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs. min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: 'mentions': mentions 'muc': MUC metric [Vilain et al, 1995] 'bcub': B-cubed [Bagga and Baldwin, 1998] 'ceafe': CEAFe [Luo et al., 2005] 'lea': LEA [Moosavi and Strube, 2016] 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric('coval') >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -', ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)', ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)', ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -', ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -', ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {'mentions/recall': 1.0,[...] 'conll_score': 100.0} """ def _A (__a , __a , __a=False , __a=False , __a=True , __a=False , __a="dummy_doc" ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = {doc: key_lines} SCREAMING_SNAKE_CASE_ : List[str] = {doc: sys_lines} SCREAMING_SNAKE_CASE_ : Dict = {} SCREAMING_SNAKE_CASE_ : Dict = 0 SCREAMING_SNAKE_CASE_ : List[str] = 0 SCREAMING_SNAKE_CASE_ : Tuple = 0 SCREAMING_SNAKE_CASE_ : int = 0 SCREAMING_SNAKE_CASE_ : List[str] = 0 SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a ) key_singletons_num += singletons_num if NP_only or min_span: SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a ) sys_singletons_num += singletons_num if NP_only or min_span: SCREAMING_SNAKE_CASE_ : Union[str, Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a ) if remove_nested: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( '''Number of resulting singleton clusters in the key ''' f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' '''files, respectively''' ) return doc_coref_infos def _A (__a , __a , __a , __a , __a , __a , __a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = get_coref_infos(__a , __a , __a , __a , __a , __a ) SCREAMING_SNAKE_CASE_ : str = {} SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE_ : str = 0 for name, metric in metrics: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = evaluator.evaluate_documents(__a , __a , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} ) logger.info( name.ljust(10 ) , f'Recall: {recall * 1_00:.2f}' , f' Precision: {precision * 1_00:.2f}' , f' F1: {fa * 1_00:.2f}' , ) if conll_subparts_num == 3: SCREAMING_SNAKE_CASE_ : Tuple = (conll / 3) * 1_00 logger.info(f'CoNLL score: {conll:.2f}' ) output_scores.update({'''conll_score''': conll} ) return output_scores def _A (__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: SCREAMING_SNAKE_CASE_ : Any = line.split()[5] if not parse_col == "-": SCREAMING_SNAKE_CASE_ : Any = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''')), '''references''': datasets.Sequence(datasets.Value('''string''')), }) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=True , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: SCREAMING_SNAKE_CASE_ : Union[str, Any] = util.check_gold_parse_annotation(lowercase_) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''') # util.parse_key_file(key_file) # key_file = key_file + ".parsed" SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate( key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , ) return score
91
1
"""simple docstring""" import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available UpperCAmelCase_ : Tuple = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 @dataclass class lowerCAmelCase__ : '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = None __UpperCamelCase = None class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "train" __UpperCamelCase = "dev" __UpperCamelCase = "test" class lowerCAmelCase__ : '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Union[Split, str]): '''simple docstring''' raise NotImplementedError @staticmethod def _SCREAMING_SNAKE_CASE ( lowercase_ : str): '''simple docstring''' raise NotImplementedError @staticmethod def _SCREAMING_SNAKE_CASE ( lowercase_ : List[InputExample] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : PreTrainedTokenizer , lowercase_ : str=False , lowercase_ : Dict="[CLS]" , lowercase_ : List[Any]=1 , lowercase_ : Optional[int]="[SEP]" , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : str=0 , lowercase_ : Dict=0 , lowercase_ : Union[str, Any]=-100 , lowercase_ : List[str]=0 , lowercase_ : Any=True , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = {label: i for i, label in enumerate(lowercase_)} SCREAMING_SNAKE_CASE_ : int = [] for ex_index, example in enumerate(lowercase_): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' , lowercase_ , len(lowercase_)) SCREAMING_SNAKE_CASE_ : Tuple = [] SCREAMING_SNAKE_CASE_ : List[str] = [] for word, label in zip(example.words , example.labels): SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize(lowercase_) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(lowercase_) > 0: tokens.extend(lowercase_) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowercase_) - 1)) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.num_special_tokens_to_add() if len(lowercase_) > max_seq_length - special_tokens_count: SCREAMING_SNAKE_CASE_ : Dict = tokens[: (max_seq_length - special_tokens_count)] SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] SCREAMING_SNAKE_CASE_ : Optional[Any] = [sequence_a_segment_id] * len(lowercase_) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: SCREAMING_SNAKE_CASE_ : int = [cls_token] + tokens SCREAMING_SNAKE_CASE_ : Any = [pad_token_label_id] + label_ids SCREAMING_SNAKE_CASE_ : Tuple = [cls_token_segment_id] + segment_ids SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.convert_tokens_to_ids(lowercase_) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. SCREAMING_SNAKE_CASE_ : Dict = [1 if mask_padding_with_zero else 0] * len(lowercase_) # Zero-pad up to the sequence length. SCREAMING_SNAKE_CASE_ : Optional[Any] = max_seq_length - len(lowercase_) if pad_on_left: SCREAMING_SNAKE_CASE_ : int = ([pad_token] * padding_length) + input_ids SCREAMING_SNAKE_CASE_ : int = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask SCREAMING_SNAKE_CASE_ : Optional[int] = ([pad_token_segment_id] * padding_length) + segment_ids SCREAMING_SNAKE_CASE_ : str = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(lowercase_) == max_seq_length assert len(lowercase_) == max_seq_length assert len(lowercase_) == max_seq_length assert len(lowercase_) == max_seq_length if ex_index < 5: logger.info('''*** Example ***''') logger.info('''guid: %s''' , example.guid) logger.info('''tokens: %s''' , ''' '''.join([str(lowercase_) for x in tokens])) logger.info('''input_ids: %s''' , ''' '''.join([str(lowercase_) for x in input_ids])) logger.info('''input_mask: %s''' , ''' '''.join([str(lowercase_) for x in input_mask])) logger.info('''segment_ids: %s''' , ''' '''.join([str(lowercase_) for x in segment_ids])) logger.info('''label_ids: %s''' , ''' '''.join([str(lowercase_) for x in label_ids])) if "token_type_ids" not in tokenizer.model_input_names: SCREAMING_SNAKE_CASE_ : Dict = None features.append( InputFeatures( input_ids=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , label_ids=lowercase_)) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = nn.CrossEntropyLoss().ignore_index def __init__( self : List[str] , lowercase_ : TokenClassificationTask , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : Tuple=False , lowercase_ : Split = Split.train , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = os.path.join( lowercase_ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(lowercase_)) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. SCREAMING_SNAKE_CASE_ : Tuple = cached_features_file + '''.lock''' with FileLock(lowercase_): if os.path.exists(lowercase_) and not overwrite_cache: logger.info(F'Loading features from cached file {cached_features_file}') SCREAMING_SNAKE_CASE_ : Tuple = torch.load(lowercase_) else: logger.info(F'Creating features from dataset file at {data_dir}') SCREAMING_SNAKE_CASE_ : List[Any] = token_classification_task.read_examples_from_file(lowercase_ , lowercase_) # TODO clean up all this to leverage built-in features of tokenizers SCREAMING_SNAKE_CASE_ : Dict = token_classification_task.convert_examples_to_features( lowercase_ , lowercase_ , lowercase_ , lowercase_ , cls_token_at_end=bool(model_type in ['''xlnet''']) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(tokenizer.padding_side == '''left''') , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F'Saving features into cached file {cached_features_file}') torch.save(self.features , lowercase_) def __len__( self : List[str]): '''simple docstring''' return len(self.features) def __getitem__( self : List[Any] , lowercase_ : str): '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class lowerCAmelCase__ : '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = -1_0_0 def __init__( self : List[str] , lowercase_ : TokenClassificationTask , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : str=False , lowercase_ : Split = Split.train , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = token_classification_task.read_examples_from_file(lowercase_ , lowercase_) # TODO clean up all this to leverage built-in features of tokenizers SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_classification_task.convert_examples_to_features( lowercase_ , lowercase_ , lowercase_ , lowercase_ , cls_token_at_end=bool(model_type in ['''xlnet''']) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(tokenizer.padding_side == '''left''') , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: SCREAMING_SNAKE_CASE_ : Tuple = tf.data.Dataset.from_generator( lowercase_ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , ( {'''input_ids''': tf.TensorShape([None]), '''attention_mask''': tf.TensorShape([None])}, tf.TensorShape([None]), ) , ) else: SCREAMING_SNAKE_CASE_ : str = tf.data.Dataset.from_generator( lowercase_ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , ( { '''input_ids''': tf.TensorShape([None]), '''attention_mask''': tf.TensorShape([None]), '''token_type_ids''': tf.TensorShape([None]), }, tf.TensorShape([None]), ) , ) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features))) return self.dataset def __len__( self : Optional[Any]): '''simple docstring''' return len(self.features) def __getitem__( self : Tuple , lowercase_ : str): '''simple docstring''' return self.features[i]
91
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Tuple = """▁""" UpperCAmelCase_ : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCAmelCase_ : str = { """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } UpperCAmelCase_ : str = { """facebook/xglm-564M""": 2048, } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self : List[Any] , lowercase_ : str , lowercase_ : Tuple="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer SCREAMING_SNAKE_CASE_ : List[str] = 7 SCREAMING_SNAKE_CASE_ : Tuple = [F'<madeupword{i}>' for i in range(self.num_madeup_words)] SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''additional_special_tokens''' , []) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} SCREAMING_SNAKE_CASE_ : List[Any] = len(self.sp_model) SCREAMING_SNAKE_CASE_ : Optional[Any] = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)} self.fairseq_tokens_to_ids.update(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy() SCREAMING_SNAKE_CASE_ : str = None SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Tuple , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} SCREAMING_SNAKE_CASE_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_) if token_ids_a is None: return [1] + ([0] * len(lowercase_)) return [1] + ([0] * len(lowercase_)) + [1, 1] + ([0] * len(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a) * [0] @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str): '''simple docstring''' return self.sp_model.encode(lowercase_ , out_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Union[str, Any]): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.PieceToId(lowercase_) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any]): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(lowercase_).replace(lowercase_ , ''' ''').strip() return out_string def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' if not os.path.isdir(lowercase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , lowercase_) elif not os.path.isfile(self.vocab_file): with open(lowercase_ , '''wb''') as fi: SCREAMING_SNAKE_CASE_ : int = self.sp_model.serialized_model_proto() fi.write(lowercase_) return (out_vocab_file,)
91
1
"""simple docstring""" import pytest UpperCAmelCase_ : Union[str, Any] = """__dummy_dataset1__""" UpperCAmelCase_ : Optional[int] = """ import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def _A () -> List[Any]: """simple docstring""" return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _A () -> str: """simple docstring""" return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _A (__a , __a , __a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = dataset_loading_script_name SCREAMING_SNAKE_CASE_ : int = tmp_path / '''datasets''' / script_name script_dir.mkdir(parents=__a ) SCREAMING_SNAKE_CASE_ : Any = script_dir / f'{script_name}.py' with open(__a , '''w''' ) as f: f.write(__a ) return str(__a )
91
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Dict = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16''' self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : str = '''fp16''' self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
91
1
"""simple docstring""" from string import ascii_uppercase UpperCAmelCase_ : Tuple = {char: i for i, char in enumerate(ascii_uppercase)} UpperCAmelCase_ : Dict = dict(enumerate(ascii_uppercase)) def _A (__a , __a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = len(__a ) SCREAMING_SNAKE_CASE_ : List[Any] = 0 while True: if x == i: SCREAMING_SNAKE_CASE_ : List[Any] = 0 if len(__a ) == len(__a ): break key += key[i] i += 1 return key def _A (__a , __a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = '''''' SCREAMING_SNAKE_CASE_ : str = 0 for letter in message: if letter == " ": cipher_text += " " else: SCREAMING_SNAKE_CASE_ : List[Any] = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def _A (__a , __a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = '''''' SCREAMING_SNAKE_CASE_ : Dict = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: SCREAMING_SNAKE_CASE_ : List[str] = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def _A () -> None: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = '''THE GERMAN ATTACK''' SCREAMING_SNAKE_CASE_ : int = '''SECRET''' SCREAMING_SNAKE_CASE_ : Dict = generate_key(__a , __a ) SCREAMING_SNAKE_CASE_ : Any = cipher_text(__a , __a ) print(f'Encrypted Text = {s}' ) print(f'Original Text = {original_text(__a , __a )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
91
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable UpperCAmelCase_ : str = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Dict = ["""GPTNeoXTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[str] = [ """GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXForCausalLM""", """GPTNeoXForQuestionAnswering""", """GPTNeoXForSequenceClassification""", """GPTNeoXForTokenClassification""", """GPTNeoXLayer""", """GPTNeoXModel""", """GPTNeoXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def _A (__a ) -> Dict: """simple docstring""" return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def _A (__a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = create_tensor(__a ) SCREAMING_SNAKE_CASE_ : List[Any] = gather(__a ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def _A (__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = [state.process_index] SCREAMING_SNAKE_CASE_ : int = gather_object(__a ) assert len(__a ) == state.num_processes, f'{gathered_obj}, {len(__a )} != {state.num_processes}' assert gathered_obj == list(range(state.num_processes ) ), f'{gathered_obj} != {list(range(state.num_processes ) )}' def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = create_tensor(__a ) SCREAMING_SNAKE_CASE_ : List[str] = broadcast(__a ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def _A (__a ) -> List[Any]: """simple docstring""" if state.is_main_process: SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device ) else: SCREAMING_SNAKE_CASE_ : Tuple = torch.arange(state.num_processes ).to(state.device ) SCREAMING_SNAKE_CASE_ : int = pad_across_processes(__a ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def _A (__a ) -> int: """simple docstring""" if state.num_processes != 2: return SCREAMING_SNAKE_CASE_ : Tuple = create_tensor(__a ) SCREAMING_SNAKE_CASE_ : Dict = reduce(__a , '''sum''' ) SCREAMING_SNAKE_CASE_ : Any = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(__a , __a ), f'{reduced_tensor} != {truth_tensor}' def _A (__a ) -> List[str]: """simple docstring""" if state.num_processes != 2: return SCREAMING_SNAKE_CASE_ : Tuple = create_tensor(__a ) SCREAMING_SNAKE_CASE_ : int = reduce(__a , '''mean''' ) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(__a , __a ), f'{reduced_tensor} != {truth_tensor}' def _A (__a ) -> Optional[Any]: """simple docstring""" main() def _A () -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = PartialState() state.print(f'State: {state}' ) state.print('''testing gather''' ) test_gather(__a ) state.print('''testing gather_object''' ) test_gather_object(__a ) state.print('''testing broadcast''' ) test_broadcast(__a ) state.print('''testing pad_across_processes''' ) test_pad_across_processes(__a ) state.print('''testing reduce_sum''' ) test_reduce_sum(__a ) state.print('''testing reduce_mean''' ) test_reduce_mean(__a ) if __name__ == "__main__": main()
91
"""simple docstring""" import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py UpperCAmelCase_ : Optional[int] = """src/transformers""" UpperCAmelCase_ : Tuple = """docs/source/en""" UpperCAmelCase_ : Optional[Any] = """.""" def _A (__a , __a , __a ) -> Dict: """simple docstring""" with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Dict = f.readlines() # Find the start prompt. SCREAMING_SNAKE_CASE_ : List[Any] = 0 while not lines[start_index].startswith(__a ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : Tuple = start_index while not lines[end_index].startswith(__a ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | UpperCAmelCase_ : Optional[Any] = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. UpperCAmelCase_ : int = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") UpperCAmelCase_ : Dict = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. UpperCAmelCase_ : int = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase_ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH) def _A (__a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __a ) return [m.group(0 ) for m in matches] def _A (__a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = 2 if text == '''✅''' or text == '''❌''' else len(__a ) SCREAMING_SNAKE_CASE_ : Tuple = (width - text_length) // 2 SCREAMING_SNAKE_CASE_ : Tuple = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ : Tuple = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } SCREAMING_SNAKE_CASE_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a ) # Let's lookup through all transformers object (once). for attr_name in dir(__a ): SCREAMING_SNAKE_CASE_ : Any = None if attr_name.endswith('''Tokenizer''' ): SCREAMING_SNAKE_CASE_ : Dict = slow_tokenizers SCREAMING_SNAKE_CASE_ : Dict = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = fast_tokenizers SCREAMING_SNAKE_CASE_ : Optional[Any] = attr_name[:-13] elif _re_tf_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : int = tf_models SCREAMING_SNAKE_CASE_ : Dict = _re_tf_models.match(__a ).groups()[0] elif _re_flax_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : Any = flax_models SCREAMING_SNAKE_CASE_ : Tuple = _re_flax_models.match(__a ).groups()[0] elif _re_pt_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : str = pt_models SCREAMING_SNAKE_CASE_ : int = _re_pt_models.match(__a ).groups()[0] if lookup_dict is not None: while len(__a ) > 0: if attr_name in model_name_to_prefix.values(): SCREAMING_SNAKE_CASE_ : List[str] = True break # Try again after removing the last word in the name SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(camel_case_split(__a )[:-1] ) # Let's build that table! SCREAMING_SNAKE_CASE_ : Any = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) SCREAMING_SNAKE_CASE_ : Any = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). SCREAMING_SNAKE_CASE_ : List[str] = [len(__a ) + 2 for c in columns] SCREAMING_SNAKE_CASE_ : str = max([len(__a ) for name in model_names] ) + 2 # Build the table per se SCREAMING_SNAKE_CASE_ : List[Any] = '''|''' + '''|'''.join([_center_text(__a , __a ) for c, w in zip(__a , __a )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" SCREAMING_SNAKE_CASE_ : Union[str, Any] = {True: '''✅''', False: '''❌'''} for name in model_names: SCREAMING_SNAKE_CASE_ : str = model_name_to_prefix[name] SCREAMING_SNAKE_CASE_ : int = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__a , __a ) for l, w in zip(__a , __a )] ) + "|\n" return table def _A (__a=False ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = _find_text_in_file( filename=os.path.join(__a , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) SCREAMING_SNAKE_CASE_ : Tuple = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__a , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ : Any = parser.parse_args() check_model_table(args.fix_and_overwrite)
91
1
"""simple docstring""" from __future__ import annotations def _A (__a , __a , __a ) -> tuple[float, list[float]]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = list(range(len(__a ) ) ) SCREAMING_SNAKE_CASE_ : Dict = [v / w for v, w in zip(__a , __a )] index.sort(key=lambda __a : ratio[i] , reverse=__a ) SCREAMING_SNAKE_CASE_ : float = 0 SCREAMING_SNAKE_CASE_ : list[float] = [0] * len(__a ) for i in index: if weight[i] <= capacity: SCREAMING_SNAKE_CASE_ : Any = 1 max_value += value[i] capacity -= weight[i] else: SCREAMING_SNAKE_CASE_ : List[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : List[str]=13 , lowercase_ : int=7 , lowercase_ : Any=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Dict=99 , lowercase_ : Union[str, Any]=24 , lowercase_ : int=2 , lowercase_ : List[str]=6 , lowercase_ : Any=37 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Any=2 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Optional[int]=None , lowercase_ : str=1000 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = parent SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length SCREAMING_SNAKE_CASE_ : List[Any] = is_training SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE_ : Optional[Any] = use_token_type_ids SCREAMING_SNAKE_CASE_ : int = use_labels SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size SCREAMING_SNAKE_CASE_ : List[str] = hidden_size SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size SCREAMING_SNAKE_CASE_ : Tuple = hidden_act SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Any = initializer_range SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels SCREAMING_SNAKE_CASE_ : Tuple = scope SCREAMING_SNAKE_CASE_ : Optional[int] = range_bbox def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 1] SCREAMING_SNAKE_CASE_ : str = t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE_ : List[str] = bbox[i, j, 2] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 0] SCREAMING_SNAKE_CASE_ : List[str] = t SCREAMING_SNAKE_CASE_ : Tuple = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ : List[str] = None SCREAMING_SNAKE_CASE_ : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ : Any = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = LiltModel(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : int = model(lowercase_ , bbox=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Optional[Any] = LiltForTokenClassification(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Tuple = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = LiltForQuestionAnswering(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : List[str] = config_and_inputs SCREAMING_SNAKE_CASE_ : str = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __UpperCamelCase = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str): '''simple docstring''' return True def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = LiltModelTester(self) SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_ : Dict = type self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Optional[int] = LiltModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) @require_torch @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(lowercase_) SCREAMING_SNAKE_CASE_ : str = torch.tensor([[1, 2]] , device=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Dict = model(input_ids=lowercase_ , bbox=lowercase_) SCREAMING_SNAKE_CASE_ : str = torch.Size([1, 2, 768]) SCREAMING_SNAKE_CASE_ : Dict = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=lowercase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowercase_) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1e-3))
91
1
"""simple docstring""" import sys from collections import defaultdict class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = [] def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any]): '''simple docstring''' return self.node_position[vertex] def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = pos def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : Union[str, Any]): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: SCREAMING_SNAKE_CASE_ : Any = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 * start + 1 else: SCREAMING_SNAKE_CASE_ : List[Any] = 2 * start + 2 if heap[smallest_child] < heap[start]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = heap[smallest_child], positions[smallest_child] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = ( heap[start], positions[start], ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = temp, tempa SCREAMING_SNAKE_CASE_ : str = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , lowercase_) self.top_to_bottom(lowercase_ , lowercase_ , lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = position[index] while index != 0: SCREAMING_SNAKE_CASE_ : int = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: SCREAMING_SNAKE_CASE_ : int = heap[parent] SCREAMING_SNAKE_CASE_ : List[Any] = position[parent] self.set_position(position[parent] , lowercase_) else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = val SCREAMING_SNAKE_CASE_ : Optional[Any] = temp self.set_position(lowercase_ , lowercase_) break SCREAMING_SNAKE_CASE_ : Tuple = parent else: SCREAMING_SNAKE_CASE_ : Tuple = val SCREAMING_SNAKE_CASE_ : Union[str, Any] = temp self.set_position(lowercase_ , 0) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(lowercase_) // 2 - 1 for i in range(lowercase_ , -1 , -1): self.top_to_bottom(lowercase_ , lowercase_ , len(lowercase_) , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Dict , lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = positions[0] SCREAMING_SNAKE_CASE_ : Optional[Any] = sys.maxsize self.top_to_bottom(lowercase_ , 0 , len(lowercase_) , lowercase_) return temp def _A (__a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = Heap() SCREAMING_SNAKE_CASE_ : List[Any] = [0] * len(__a ) SCREAMING_SNAKE_CASE_ : List[str] = [-1] * len(__a ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph SCREAMING_SNAKE_CASE_ : Optional[int] = [] # Heap of Distance of vertices from their neighboring vertex SCREAMING_SNAKE_CASE_ : List[str] = [] for vertex in range(len(__a ) ): distance_tv.append(sys.maxsize ) positions.append(__a ) heap.node_position.append(__a ) SCREAMING_SNAKE_CASE_ : int = [] SCREAMING_SNAKE_CASE_ : str = 1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = sys.maxsize for neighbor, distance in adjacency_list[0]: SCREAMING_SNAKE_CASE_ : Optional[int] = 0 SCREAMING_SNAKE_CASE_ : Any = distance heap.heapify(__a , __a ) for _ in range(1 , len(__a ) ): SCREAMING_SNAKE_CASE_ : Tuple = heap.delete_minimum(__a , __a ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) SCREAMING_SNAKE_CASE_ : str = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__a )] ): SCREAMING_SNAKE_CASE_ : Optional[int] = distance heap.bottom_to_top( __a , heap.get_position(__a ) , __a , __a ) SCREAMING_SNAKE_CASE_ : Dict = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase_ : List[str] = int(input("""Enter number of edges: """).strip()) UpperCAmelCase_ : Optional[int] = defaultdict(list) for _ in range(edges_number): UpperCAmelCase_ : str = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
91
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCAmelCase_ : Dict = logging.getLogger(__name__) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser( description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)""" ) parser.add_argument( """--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset.""" ) parser.add_argument( """--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file.""" ) parser.add_argument("""--vocab_size""", default=30522, type=int) UpperCAmelCase_ : Optional[Any] = parser.parse_args() logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file, """rb""") as fp: UpperCAmelCase_ : Union[str, Any] = pickle.load(fp) logger.info("""Counting occurrences for MLM.""") UpperCAmelCase_ : Any = Counter() for tk_ids in data: counter.update(tk_ids) UpperCAmelCase_ : List[Any] = [0] * args.vocab_size for k, v in counter.items(): UpperCAmelCase_ : Dict = v logger.info(f'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, """wb""") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
91
1
"""simple docstring""" from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": UpperCAmelCase_ : Tuple = input("""Enter image url: """).strip() print(f'''Downloading image from {url} ...''') UpperCAmelCase_ : int = BeautifulSoup(requests.get(url).content, """html.parser""") # The image URL is in the content field of the first meta tag with property og:image UpperCAmelCase_ : List[Any] = soup.find("""meta""", {"""property""": """og:image"""})["""content"""] UpperCAmelCase_ : List[Any] = requests.get(image_url).content UpperCAmelCase_ : str = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, """wb""") as fp: fp.write(image_data) print(f'''Done. Image saved to disk as {file_name}.''')
91
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) def _A (__a , __a ) -> Tuple: """simple docstring""" try: with open(__a , '''rb''' ) as flax_state_f: SCREAMING_SNAKE_CASE_ : Optional[int] = from_bytes(__a , flax_state_f.read() ) except UnpicklingError as e: try: with open(__a ) as f: if f.read().startswith('''version''' ): raise OSError( '''You seem to have cloned a repository without having git-lfs installed. Please''' ''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the''' ''' folder you cloned.''' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(__a , __a ) def _A (__a , __a ) -> Tuple: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights SCREAMING_SNAKE_CASE_ : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , __a ) ).values() if any(__a ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.tree_util.tree_map( lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __a ) SCREAMING_SNAKE_CASE_ : int = '''''' SCREAMING_SNAKE_CASE_ : str = flatten_dict(__a , sep='''.''' ) SCREAMING_SNAKE_CASE_ : List[Any] = pt_model.state_dict() # keep track of unexpected & missing keys SCREAMING_SNAKE_CASE_ : str = [] SCREAMING_SNAKE_CASE_ : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple.split('''.''' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple_array[:-1] + ['''weight'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.transpose(__a , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": SCREAMING_SNAKE_CASE_ : Tuple = flax_key_tuple_array[:-1] + ['''weight'''] SCREAMING_SNAKE_CASE_ : Optional[int] = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": SCREAMING_SNAKE_CASE_ : Optional[int] = flax_key_tuple_array[:-1] + ['''weight'''] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(__a ): SCREAMING_SNAKE_CASE_ : List[str] = ( flax_key_tuple_string.replace('''_0''' , '''.0''' ) .replace('''_1''' , '''.1''' ) .replace('''_2''' , '''.2''' ) .replace('''_3''' , '''.3''' ) .replace('''_4''' , '''.4''' ) .replace('''_5''' , '''.5''' ) .replace('''_6''' , '''.6''' ) .replace('''_7''' , '''.7''' ) .replace('''_8''' , '''.8''' ) .replace('''_9''' , '''.9''' ) ) SCREAMING_SNAKE_CASE_ : Optional[Any] = '''.'''.join(__a ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(__a ) if not isinstance(__a , np.ndarray ) else flax_tensor SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(__a ) # remove from missing keys missing_keys.remove(__a ) else: # weight is not expected by PyTorch model unexpected_keys.append(__a ) pt_model.load_state_dict(__a ) # re-transform missing_keys to list SCREAMING_SNAKE_CASE_ : int = list(__a ) if len(__a ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) if len(__a ) > 0: logger.warning( f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) return pt_model
91
1
"""simple docstring""" import requests UpperCAmelCase_ : str = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=""" def _A (__a ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['''articles'''] , 1 ): print(f'{i}.) {article["title"]}' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
91
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Any = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "openai-gpt" __UpperCamelCase = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[str] , lowercase_ : List[str]=40478 , lowercase_ : List[str]=512 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=1e-5 , lowercase_ : int=0.02 , lowercase_ : Optional[int]="cls_index" , lowercase_ : Any=True , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=0.1 , **lowercase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE_ : Tuple = n_positions SCREAMING_SNAKE_CASE_ : Optional[int] = n_embd SCREAMING_SNAKE_CASE_ : Dict = n_layer SCREAMING_SNAKE_CASE_ : Any = n_head SCREAMING_SNAKE_CASE_ : Union[str, Any] = afn SCREAMING_SNAKE_CASE_ : int = resid_pdrop SCREAMING_SNAKE_CASE_ : List[str] = embd_pdrop SCREAMING_SNAKE_CASE_ : Union[str, Any] = attn_pdrop SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range SCREAMING_SNAKE_CASE_ : List[str] = summary_type SCREAMING_SNAKE_CASE_ : Tuple = summary_use_proj SCREAMING_SNAKE_CASE_ : Union[str, Any] = summary_activation SCREAMING_SNAKE_CASE_ : Any = summary_first_dropout SCREAMING_SNAKE_CASE_ : List[str] = summary_proj_to_labels super().__init__(**lowercase_)
91
1
"""simple docstring""" import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) def _A (__a=None , __a=None ) -> int: """simple docstring""" return field(default_factory=lambda: default , metadata=__a ) @dataclass class lowerCAmelCase__ : '''simple docstring''' __UpperCamelCase = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __UpperCamelCase = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __UpperCamelCase = list_field( default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __UpperCamelCase = field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __UpperCamelCase = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __UpperCamelCase = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) __UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) __UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) __UpperCamelCase = field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __UpperCamelCase = field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) __UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) __UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) __UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) __UpperCamelCase = field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __UpperCamelCase = field( default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) __UpperCamelCase = field( default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __UpperCamelCase = field( default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __UpperCamelCase = field( default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __UpperCamelCase = field( default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) __UpperCamelCase = field( default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) __UpperCamelCase = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __UpperCamelCase = field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' warnings.warn( F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , lowercase_ , ) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' return json.dumps(dataclasses.asdict(self) , indent=2) @property def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' if len(self.models) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''') return self.models @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''') return False else: return True
91
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[str] , *lowercase_ : Dict , **lowercase_ : Union[str, Any]): '''simple docstring''' warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_)
91
1
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = (PNDMScheduler,) __UpperCamelCase = (("num_inference_steps", 5_0),) def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = { '''num_train_timesteps''': 1000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowercase_) return config def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]=0 , **lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class.from_pretrained(lowercase_) new_scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Optional[Any] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]=0 , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(lowercase_) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Any = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Tuple = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : str , **lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Dict = 10 SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_) for i, t in enumerate(scheduler.prk_timesteps): SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample for i, t in enumerate(scheduler.plms_timesteps): SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_).prev_sample return sample def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''num_inference_steps''' , lowercase_) for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps'''): scheduler.set_timesteps(lowercase_) elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps'''): SCREAMING_SNAKE_CASE_ : Optional[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] SCREAMING_SNAKE_CASE_ : Optional[int] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Any = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config(steps_offset=1) SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**lowercase_) scheduler.set_timesteps(10) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , ) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]): self.check_over_forward(num_inference_steps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27 for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample SCREAMING_SNAKE_CASE_ : str = 0.1 * sample SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2]): SCREAMING_SNAKE_CASE_ : int = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' with self.assertRaises(lowercase_): SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**lowercase_) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.full_loop() SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_98.13_18) < 1e-2 assert abs(result_mean.item() - 0.25_80) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.full_loop(prediction_type='''v_prediction''') SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 67.39_86) < 1e-2 assert abs(result_mean.item() - 0.08_78) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 2_30.03_99) < 1e-2 assert abs(result_mean.item() - 0.29_95) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_86.94_82) < 1e-2 assert abs(result_mean.item() - 0.24_34) < 1e-3
91
"""simple docstring""" import random from typing import Any def _A (__a ) -> list[Any]: """simple docstring""" for _ in range(len(__a ) ): SCREAMING_SNAKE_CASE_ : Optional[int] = random.randint(0 , len(__a ) - 1 ) SCREAMING_SNAKE_CASE_ : Tuple = random.randint(0 , len(__a ) - 1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = data[b], data[a] return data if __name__ == "__main__": UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5, 6, 7] UpperCAmelCase_ : Dict = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ : Any = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[int] = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] UpperCAmelCase_ : Union[str, Any] = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] UpperCAmelCase_ : Optional[Any] = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): UpperCAmelCase_ : Any = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
91
"""simple docstring""" import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _A (__a , __a , __a ) -> Dict: """simple docstring""" if gpta_config_file == "": SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaConfig() else: SCREAMING_SNAKE_CASE_ : Tuple = GPTaConfig.from_json_file(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaModel(__a ) # Load weights from numpy load_tf_weights_in_gpta(__a , __a , __a ) # Save pytorch-model SCREAMING_SNAKE_CASE_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME SCREAMING_SNAKE_CASE_ : List[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , __a ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(__a , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--gpt2_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
91
1
"""simple docstring""" import random from .binary_exp_mod import bin_exp_mod def _A (__a , __a=10_00 ) -> int: """simple docstring""" if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd SCREAMING_SNAKE_CASE_ : Optional[Any] = n - 1 SCREAMING_SNAKE_CASE_ : int = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) SCREAMING_SNAKE_CASE_ : Optional[Any] = 0 while count < prec: SCREAMING_SNAKE_CASE_ : int = random.randint(2 , n - 1 ) SCREAMING_SNAKE_CASE_ : List[Any] = bin_exp_mod(__a , __a , __a ) if b != 1: SCREAMING_SNAKE_CASE_ : Dict = True for _ in range(__a ): if b == n - 1: SCREAMING_SNAKE_CASE_ : Union[str, Any] = False break SCREAMING_SNAKE_CASE_ : Optional[Any] = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": UpperCAmelCase_ : List[Any] = abs(int(input("""Enter bound : """).strip())) print("""Here's the list of primes:""") print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
91
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
91
1
"""simple docstring""" class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' pass class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' pass class lowerCAmelCase__ : '''simple docstring''' def __init__( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = [ [], [], [], ] def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int , lowercase_ : int): '''simple docstring''' try: if len(self.queues[priority]) >= 100: raise OverflowError('''Maximum queue size is 100''') self.queues[priority].append(lowercase_) except IndexError: raise ValueError('''Valid priorities are 0, 1, and 2''') def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' for queue in self.queues: if queue: return queue.pop(0) raise UnderFlowError('''All queues are empty''') def __str__( self : List[str]): '''simple docstring''' return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues)) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int): '''simple docstring''' if len(self.queue) == 100: raise OverFlowError('''Maximum queue size is 100''') self.queue.append(lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' if not self.queue: raise UnderFlowError('''The queue is empty''') else: SCREAMING_SNAKE_CASE_ : Any = min(self.queue) self.queue.remove(lowercase_) return data def __str__( self : int): '''simple docstring''' return str(self.queue) def _A () -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 1_00 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 1_28 ) print(__a ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(__a ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _A () -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(1_00 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(1_28 ) print(__a ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(__a ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
91
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL UpperCAmelCase_ : int = logging.get_logger(__name__) def _A (__a ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__a ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ): '''simple docstring''' super().__init__(**lowercase_) SCREAMING_SNAKE_CASE_ : str = size if size is not None else {'''shortest_edge''': 256} SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''') SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize SCREAMING_SNAKE_CASE_ : List[Any] = size SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop SCREAMING_SNAKE_CASE_ : Dict = crop_size SCREAMING_SNAKE_CASE_ : List[Any] = resample SCREAMING_SNAKE_CASE_ : List[str] = do_rescale SCREAMING_SNAKE_CASE_ : List[str] = rescale_factor SCREAMING_SNAKE_CASE_ : List[Any] = offset SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize SCREAMING_SNAKE_CASE_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_) if "shortest_edge" in size: SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(lowercase_ , size['''shortest_edge'''] , default_to_square=lowercase_) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE_ : Union[str, Any] = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}') return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_) if "height" not in size or "width" not in size: raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}') return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = image.astype(np.floataa) if offset: SCREAMING_SNAKE_CASE_ : Tuple = image - (scale / 2) return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''') # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ : List[str] = to_numpy_array(lowercase_) if do_resize: SCREAMING_SNAKE_CASE_ : List[Any] = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) if do_center_crop: SCREAMING_SNAKE_CASE_ : Dict = self.center_crop(lowercase_ , size=lowercase_) if do_rescale: SCREAMING_SNAKE_CASE_ : int = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_) if do_normalize: SCREAMING_SNAKE_CASE_ : Dict = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = to_channel_dimension_format(lowercase_ , lowercase_) return image def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ : Dict = offset if offset is not None else self.offset SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ : Dict = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''') if not valid_images(lowercase_): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') SCREAMING_SNAKE_CASE_ : Tuple = make_batched(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = [ [ self._preprocess_image( image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , ) for img in video ] for video in videos ] SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': videos} return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
91
1
"""simple docstring""" import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowerCAmelCase__ : '''simple docstring''' def __init__( self : int , lowercase_ : List[str] , lowercase_ : int=None , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : List[Any]="resnet50" , lowercase_ : Dict=3 , lowercase_ : str=32 , lowercase_ : str=3 , lowercase_ : Tuple=True , lowercase_ : str=True , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = parent SCREAMING_SNAKE_CASE_ : Tuple = out_indices if out_indices is not None else [4] SCREAMING_SNAKE_CASE_ : Optional[int] = stage_names SCREAMING_SNAKE_CASE_ : Any = out_features SCREAMING_SNAKE_CASE_ : List[str] = backbone SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size SCREAMING_SNAKE_CASE_ : Dict = num_channels SCREAMING_SNAKE_CASE_ : Any = use_pretrained_backbone SCREAMING_SNAKE_CASE_ : Any = is_training def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ : List[Any] = self.get_config() return config, pixel_values def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Dict , lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = TimmBackbone(config=lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE_ : List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = (TimmBackbone,) if is_torch_available() else () __UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {} __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = TimmBackboneModelTester(self) SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''resnet18''' SCREAMING_SNAKE_CASE_ : List[Any] = '''microsoft/resnet-18''' SCREAMING_SNAKE_CASE_ : str = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_) SCREAMING_SNAKE_CASE_ : str = AutoBackbone.from_pretrained(lowercase_) self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features)) self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names)) self.assertEqual(timm_model.channels , transformers_model.channels) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,)) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1]) SCREAMING_SNAKE_CASE_ : Optional[int] = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3]) SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3]) self.assertEqual(timm_model.out_indices , transformers_model.out_indices) self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features)) self.assertEqual(timm_model.channels , transformers_model.channels) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''') def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''') def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''') def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''') def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''') def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''') def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''') def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''') def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''') def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' pass @unittest.skip('''Safetensors is not supported by timm.''') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ : Optional[Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ : Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : Optional[Any] = self.has_attentions # no need to test all models as different heads yield the same functionality SCREAMING_SNAKE_CASE_ : Optional[int] = self.all_model_classes[0] SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase_) model.to(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = model(**lowercase_) SCREAMING_SNAKE_CASE_ : int = outputs[0][-1] # Encoder-/Decoder-only models SCREAMING_SNAKE_CASE_ : List[str] = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: SCREAMING_SNAKE_CASE_ : Any = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=lowercase_) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : int = model_class(lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : int = model(**lowercase_) self.assertEqual(len(result.feature_maps) , len(config.out_indices)) self.assertEqual(len(model.channels) , len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None SCREAMING_SNAKE_CASE_ : List[str] = copy.deepcopy(lowercase_) SCREAMING_SNAKE_CASE_ : int = None SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**lowercase_) self.assertEqual(len(result.feature_maps) , 1) self.assertEqual(len(model.channels) , 1) # Check backbone can be initialized with fresh weights SCREAMING_SNAKE_CASE_ : int = copy.deepcopy(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = False SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase_)
91
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase_ : Dict = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } UpperCAmelCase_ : List[str] = { """gpt2""": 1024, """gpt2-medium""": 1024, """gpt2-large""": 1024, """gpt2-xl""": 1024, """distilgpt2""": 1024, } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] __UpperCamelCase = GPTaTokenizer def __init__( self : Optional[int] , lowercase_ : int=None , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple="<|endoftext|>" , lowercase_ : str="<|endoftext|>" , lowercase_ : Dict="<|endoftext|>" , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ): '''simple docstring''' super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('''add_bos_token''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , lowercase_) != add_prefix_space: SCREAMING_SNAKE_CASE_ : int = getattr(lowercase_ , pre_tok_state.pop('''type''')) SCREAMING_SNAKE_CASE_ : str = add_prefix_space SCREAMING_SNAKE_CASE_ : Dict = pre_tok_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = add_prefix_space def _SCREAMING_SNAKE_CASE ( self : str , *lowercase_ : List[Any] , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''is_split_into_words''' , lowercase_) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowercase_ : List[str] , **lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = kwargs.get('''is_split_into_words''' , lowercase_) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_) return tuple(lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : "Conversation"): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_) + [self.eos_token_id]) if len(lowercase_) > self.model_max_length: SCREAMING_SNAKE_CASE_ : Any = input_ids[-self.model_max_length :] return input_ids
91
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = CycleDiffusionPipeline __UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } __UpperCamelCase = PipelineTesterMixin.required_optional_params - {"latents"} __UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} ) __UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE_ : List[str] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE_ : int = CLIPTextModel(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') SCREAMING_SNAKE_CASE_ : Dict = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_)).to(lowercase_) SCREAMING_SNAKE_CASE_ : str = image / 2 + 0.5 if str(lowercase_).startswith('''mps'''): SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(lowercase_) else: SCREAMING_SNAKE_CASE_ : List[str] = torch.Generator(device=lowercase_).manual_seed(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = { '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : List[str] = CycleDiffusionPipeline(**lowercase_) SCREAMING_SNAKE_CASE_ : str = pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = pipe(**lowercase_) SCREAMING_SNAKE_CASE_ : int = output.images SCREAMING_SNAKE_CASE_ : Union[str, Any] = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_ : Dict = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''') def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase_ , '''half'''): SCREAMING_SNAKE_CASE_ : Union[str, Any] = module.half() SCREAMING_SNAKE_CASE_ : Tuple = CycleDiffusionPipeline(**lowercase_) SCREAMING_SNAKE_CASE_ : str = pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : int = self.get_dummy_inputs(lowercase_) SCREAMING_SNAKE_CASE_ : Any = pipe(**lowercase_) SCREAMING_SNAKE_CASE_ : int = output.images SCREAMING_SNAKE_CASE_ : List[Any] = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_ : int = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''') def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''') SCREAMING_SNAKE_CASE_ : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''') SCREAMING_SNAKE_CASE_ : List[str] = init_image.resize((512, 512)) SCREAMING_SNAKE_CASE_ : Optional[int] = '''CompVis/stable-diffusion-v1-4''' SCREAMING_SNAKE_CASE_ : int = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''') SCREAMING_SNAKE_CASE_ : List[str] = CycleDiffusionPipeline.from_pretrained( lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='''fp16''') pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ : Any = '''A black colored car''' SCREAMING_SNAKE_CASE_ : Optional[Any] = '''A blue colored car''' SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : Tuple = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image).max() < 5e-1 def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''') SCREAMING_SNAKE_CASE_ : Any = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''') SCREAMING_SNAKE_CASE_ : List[Any] = init_image.resize((512, 512)) SCREAMING_SNAKE_CASE_ : Tuple = '''CompVis/stable-diffusion-v1-4''' SCREAMING_SNAKE_CASE_ : List[str] = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''') SCREAMING_SNAKE_CASE_ : Tuple = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_) pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ : Optional[int] = '''A black colored car''' SCREAMING_SNAKE_CASE_ : Dict = '''A blue colored car''' SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Tuple = output.images assert np.abs(image - expected_image).max() < 2e-2
91
"""simple docstring""" import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(lowercase_ , '''hidden_sizes''')) self.parent.assertTrue(hasattr(lowercase_ , '''num_attention_heads''')) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=13 , lowercase_ : Dict=64 , lowercase_ : Dict=3 , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=2 , lowercase_ : Any=1 , lowercase_ : List[Any]=16 , lowercase_ : int=[128, 256, 384] , lowercase_ : str=[4, 6, 8] , lowercase_ : Optional[Any]=[2, 3, 4] , lowercase_ : Union[str, Any]=[16, 16, 16] , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[int]=[2, 2, 2] , lowercase_ : Any=[2, 2, 2] , lowercase_ : List[str]=0.02 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=2 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = parent SCREAMING_SNAKE_CASE_ : Any = batch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size SCREAMING_SNAKE_CASE_ : int = num_channels SCREAMING_SNAKE_CASE_ : List[Any] = kernel_size SCREAMING_SNAKE_CASE_ : Optional[Any] = stride SCREAMING_SNAKE_CASE_ : List[str] = padding SCREAMING_SNAKE_CASE_ : int = hidden_sizes SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE_ : int = depths SCREAMING_SNAKE_CASE_ : Optional[Any] = key_dim SCREAMING_SNAKE_CASE_ : Optional[Any] = drop_path_rate SCREAMING_SNAKE_CASE_ : Tuple = patch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_ratio SCREAMING_SNAKE_CASE_ : str = mlp_ratio SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE_ : List[Any] = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] SCREAMING_SNAKE_CASE_ : Any = is_training SCREAMING_SNAKE_CASE_ : Tuple = use_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_labels) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = LevitModel(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_) SCREAMING_SNAKE_CASE_ : Any = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = image_size[0], image_size[1] for _ in range(4): SCREAMING_SNAKE_CASE_ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1) SCREAMING_SNAKE_CASE_ : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.num_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitForImageClassification(lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) __UpperCamelCase = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitModelTester(self) SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' return @unittest.skip(reason='''Levit does not use inputs_embeds''') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' pass @unittest.skip(reason='''Levit does not support input and output embeddings''') def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' pass @unittest.skip(reason='''Levit does not output attentions''') def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ : Dict = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' def check_hidden_states_output(lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str): SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_)) SCREAMING_SNAKE_CASE_ : str = outputs.hidden_states SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.model_tester.depths) + 1 self.assertEqual(len(lowercase_) , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size[0], image_size[1] for _ in range(4): SCREAMING_SNAKE_CASE_ : Optional[Any] = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1) SCREAMING_SNAKE_CASE_ : Optional[int] = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ : Tuple = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : Union[str, Any] = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowercase_) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_) model.to(lowercase_) model.train() SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase_).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ : Union[str, Any] = False SCREAMING_SNAKE_CASE_ : Optional[int] = True for model_class in self.all_model_classes: if model_class in get_values(lowercase_) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE_ : List[str] = model_class(lowercase_) model.gradient_checkpointing_enable() model.to(lowercase_) model.train() SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model(**lowercase_).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : List[Any] = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowercase_), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}'): SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''title'''] SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''num_labels'''] SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_) model.to(lowercase_) model.train() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE_ : str = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels''']) SCREAMING_SNAKE_CASE_ : Any = inputs['''labels'''].to(problem_type['''dtype''']) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowercase_) as warning_list: SCREAMING_SNAKE_CASE_ : int = model(**lowercase_).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( F'Something is going wrong in the regression problem: intercepted {w.message}') loss.backward() @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Optional[Any] = LevitModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) def _A () -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.default_image_processor SCREAMING_SNAKE_CASE_ : str = prepare_img() SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Any = model(**lowercase_) # verify the logits SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([1.04_48, -0.37_45, -1.83_17]).to(lowercase_) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
91
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase_ : List[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : List[Any] , ): '''simple docstring''' super().__init__(**lowercase_) SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {'''shortest_edge''': 224} SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} SCREAMING_SNAKE_CASE_ : int = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='''crop_size''') SCREAMING_SNAKE_CASE_ : Tuple = do_resize SCREAMING_SNAKE_CASE_ : Tuple = size SCREAMING_SNAKE_CASE_ : Optional[int] = resample SCREAMING_SNAKE_CASE_ : Optional[Any] = do_center_crop SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size SCREAMING_SNAKE_CASE_ : str = do_rescale SCREAMING_SNAKE_CASE_ : Optional[Any] = rescale_factor SCREAMING_SNAKE_CASE_ : Tuple = do_normalize SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN SCREAMING_SNAKE_CASE_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD SCREAMING_SNAKE_CASE_ : Any = do_convert_rgb def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(lowercase_ , default_to_square=lowercase_) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}') SCREAMING_SNAKE_CASE_ : Any = get_resize_output_image_size(lowercase_ , size=size['''shortest_edge'''] , default_to_square=lowercase_) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = get_size_dict(lowercase_) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}') return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ): '''simple docstring''' return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ): '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ : int = size if size is not None else self.size SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(lowercase_ , param_name='''size''' , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : str = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE_ : Optional[Any] = get_size_dict(lowercase_ , param_name='''crop_size''' , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ : List[Any] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb SCREAMING_SNAKE_CASE_ : int = make_list_of_images(lowercase_) if not valid_images(lowercase_): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') # PIL RGBA images are converted to RGB if do_convert_rgb: SCREAMING_SNAKE_CASE_ : Union[str, Any] = [convert_to_rgb(lowercase_) for image in images] # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ : Union[str, Any] = [to_numpy_array(lowercase_) for image in images] if do_resize: SCREAMING_SNAKE_CASE_ : List[str] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE_ : str = [self.center_crop(image=lowercase_ , size=lowercase_) for image in images] if do_rescale: SCREAMING_SNAKE_CASE_ : List[str] = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_ : List[str] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images] SCREAMING_SNAKE_CASE_ : Optional[int] = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images] SCREAMING_SNAKE_CASE_ : List[Any] = {'''pixel_values''': images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
91
"""simple docstring""" from math import factorial def _A (__a = 20 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... SCREAMING_SNAKE_CASE_ : List[str] = n // 2 return int(factorial(__a ) / (factorial(__a ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: UpperCAmelCase_ : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number.""")
91
1
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
91
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCAmelCase_ : Any = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : List[str]): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_)
91
1
from __future__ import annotations import time import numpy as np UpperCAmelCase__ = [8, 5, 9, 7] UpperCAmelCase__ = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] UpperCAmelCase__ = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class lowercase_ : '''simple docstring''' def __init__( self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[list[int]] , ) ->None: """simple docstring""" a = claim_vector a = allocated_resources_table a = maximum_claim_table def __lowerCAmelCase ( self : Any ) ->list[int]: """simple docstring""" return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def __lowerCAmelCase ( self : Optional[int] ) ->list[int]: """simple docstring""" return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def __lowerCAmelCase ( self : Union[str, Any] ) ->list[list[int]]: """simple docstring""" return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def __lowerCAmelCase ( self : Tuple ) ->dict[int, list[int]]: """simple docstring""" return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()} def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->None: """simple docstring""" a = self.__need() a = self.__allocated_resources_table a = self.__available_resources() a = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('''_''' * 50 + '''\n''' ) while need_list: a = False for each_need in need_list: a = True for index, need in enumerate(__UpperCAmelCase ): if need > available_resources[index]: a = False break if execution: a = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: a = original_need_index print(F"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(__UpperCAmelCase ) # update available/freed resources stack a = np.array(__UpperCAmelCase ) + np.array( alloc_resources_table[process_number] ) print( '''Updated available resource stack for processes: ''' + ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) ) break if safe: print('''The process is in a safe state.\n''' ) else: print('''System in unsafe state. Aborting...\n''' ) break def __lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" print(''' ''' * 9 + '''Allocated Resource Table''' ) for item in self.__allocated_resources_table: print( F"""P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}""" + ''' '''.join(F"""{it:>8}""" for it in item ) + '''\n''' ) print(''' ''' * 9 + '''System Resource Table''' ) for item in self.__maximum_claim_table: print( F"""P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}""" + ''' '''.join(F"""{it:>8}""" for it in item ) + '''\n''' ) print( '''Current Usage by Active Processes: ''' + ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) ) print( '''Initial Available Resources: ''' + ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
0
"""simple docstring""" from __future__ import annotations class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : int = 0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE_ : List[str] = '''''' for ch in content: ans += chr(ord(lowercase_) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE_ : List[Any] = '''''' for ch in content: ans += chr(ord(lowercase_) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) try: with open(lowercase_) as fin, open('''encrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowercase_ , lowercase_)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) try: with open(lowercase_) as fin, open('''decrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowercase_ , lowercase_)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
91
0
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __A ( unittest.TestCase ): def _lowercase (self : Any ): UpperCAmelCase_ = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def _lowercase (self : int ): UpperCAmelCase_ = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__a ) ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def _lowercase (self : str ): UpperCAmelCase_ = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(__a ) ) def _lowercase (self : Dict ): UpperCAmelCase_ = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] UpperCAmelCase_ = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] UpperCAmelCase_ = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def _lowercase (self : Optional[int] ): # pass variant but use the non-variant filenames UpperCAmelCase_ = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] UpperCAmelCase_ = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] UpperCAmelCase_ = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) ) def _lowercase (self : str ): UpperCAmelCase_ = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] UpperCAmelCase_ = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def _lowercase (self : Union[str, Any] ): # pass variant but use the non-variant filenames UpperCAmelCase_ = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] UpperCAmelCase_ = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def _lowercase (self : int ): UpperCAmelCase_ = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] UpperCAmelCase_ = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
1
"""simple docstring""" def _A (__a = 50 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
91
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> int: """simple docstring""" lowercase__ = [False] * len(A ) lowercase__ = [] queue.append(A ) lowercase__ = True while queue: lowercase__ = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(A ) lowercase__ = True lowercase__ = u return visited[t] def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[int]: """simple docstring""" lowercase__ = [-1] * (len(A )) lowercase__ = 0 while bfs(A , A , A , A ): lowercase__ = float('''Inf''' ) lowercase__ = sink while s != source: # Find the minimum value in select path lowercase__ = min(A , graph[parent[s]][s] ) lowercase__ = parent[s] max_flow += path_flow lowercase__ = sink while v != source: lowercase__ = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase__ = parent[v] return max_flow lowerCamelCase : Any = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCamelCase , lowerCamelCase : Tuple = 0, 5 print(ford_fulkerson(graph, source, sink))
2
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = (PNDMScheduler,) __UpperCamelCase = (("num_inference_steps", 5_0),) def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = { '''num_train_timesteps''': 1000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowercase_) return config def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]=0 , **lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class.from_pretrained(lowercase_) new_scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Optional[Any] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]=0 , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(lowercase_) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Any = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Tuple = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : str , **lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Dict = 10 SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_) for i, t in enumerate(scheduler.prk_timesteps): SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample for i, t in enumerate(scheduler.plms_timesteps): SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_).prev_sample return sample def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''num_inference_steps''' , lowercase_) for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps'''): scheduler.set_timesteps(lowercase_) elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps'''): SCREAMING_SNAKE_CASE_ : Optional[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] SCREAMING_SNAKE_CASE_ : Optional[int] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Any = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config(steps_offset=1) SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**lowercase_) scheduler.set_timesteps(10) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , ) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]): self.check_over_forward(num_inference_steps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27 for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample SCREAMING_SNAKE_CASE_ : str = 0.1 * sample SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2]): SCREAMING_SNAKE_CASE_ : int = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' with self.assertRaises(lowercase_): SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**lowercase_) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.full_loop() SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_98.13_18) < 1e-2 assert abs(result_mean.item() - 0.25_80) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.full_loop(prediction_type='''v_prediction''') SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 67.39_86) < 1e-2 assert abs(result_mean.item() - 0.08_78) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 2_30.03_99) < 1e-2 assert abs(result_mean.item() - 0.29_95) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_86.94_82) < 1e-2 assert abs(result_mean.item() - 0.24_34) < 1e-3
91
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if discount_rate < 0: raise ValueError('''Discount rate cannot be negative''' ) if not cash_flows: raise ValueError('''Cash flows list cannot be empty''' ) A : List[Any] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case__ ) ) return round(snake_case__ , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
3
"""simple docstring""" import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @parameterized.expand([(None,), ('''foo.json''',)]) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50) self.assertEqual(loaded_config.max_length , 20) self.assertEqual(loaded_config.max_time , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained('''gpt2''') SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_model_config(lowercase_) SCREAMING_SNAKE_CASE_ : int = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = GenerationConfig() SCREAMING_SNAKE_CASE_ : Any = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = generation_config.update(**lowercase_) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {'''foo''': '''bar'''}) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig() SCREAMING_SNAKE_CASE_ : List[str] = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''') as tmp_dir: generation_config.save_pretrained(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = GenerationConfig.from_pretrained(lowercase_) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''') SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig.from_model_config(lowercase_) assert not hasattr(lowercase_ , '''foo''') # no new kwargs should be initialized if from config def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , lowercase_) self.assertEqual(default_config.num_beams , 1) SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , lowercase_) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , lowercase_) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = TOKEN HfFolder.save_token(lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str]): '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-generation-config''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''') except HTTPError: pass def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_pretrained(F'{USER}/test-generation-config') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id='''test-generation-config''' , push_to_hub=lowercase_ , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Optional[int] = GenerationConfig.from_pretrained(F'{USER}/test-generation-config') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowercase_ , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
91
0
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : Tuple = (PNDMScheduler,) lowerCamelCase : Optional[int] = (('''num_inference_steps''', 50),) def __UpperCAmelCase ( self : int , **UpperCAmelCase__ : List[str] ) -> Dict: lowerCAmelCase = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**UpperCAmelCase__ ) return config def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : List[Any] ) -> Tuple: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('num_inference_steps' , UpperCAmelCase__ ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config(**UpperCAmelCase__ ) lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase__ ) lowerCAmelCase = scheduler_class.from_pretrained(UpperCAmelCase__ ) new_scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[:] lowerCAmelCase = scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample lowerCAmelCase = new_scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowerCAmelCase = scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample lowerCAmelCase = new_scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __UpperCAmelCase ( self : List[str] ) -> Tuple: pass def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[int]=0 , **UpperCAmelCase__ : Tuple ) -> List[str]: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('num_inference_steps' , UpperCAmelCase__ ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase__ ) lowerCAmelCase = scheduler_class.from_pretrained(UpperCAmelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residual (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[:] lowerCAmelCase = scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample lowerCAmelCase = new_scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowerCAmelCase = scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample lowerCAmelCase = new_scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __UpperCAmelCase ( self : Dict , **UpperCAmelCase__ : Tuple ) -> str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**UpperCAmelCase__ ) lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) lowerCAmelCase = 1_0 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(UpperCAmelCase__ ) for i, t in enumerate(scheduler.prk_timesteps ): lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = scheduler.step_plms(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample return sample def __UpperCAmelCase ( self : Tuple ) -> List[Any]: lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('num_inference_steps' , UpperCAmelCase__ ) for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCAmelCase__ , 'set_timesteps' ): scheduler.set_timesteps(UpperCAmelCase__ ) elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , 'set_timesteps' ): lowerCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowerCAmelCase = dummy_past_residuals[:] lowerCAmelCase = scheduler.step_prk(UpperCAmelCase__ , 0 , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample lowerCAmelCase = scheduler.step_prk(UpperCAmelCase__ , 1 , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) lowerCAmelCase = scheduler.step_plms(UpperCAmelCase__ , 0 , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample lowerCAmelCase = scheduler.step_plms(UpperCAmelCase__ , 1 , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __UpperCAmelCase ( self : int ) -> Tuple: for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Dict ) -> Optional[int]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=UpperCAmelCase__ ) lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(steps_offset=1 ) lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(1_0 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , ) def __UpperCAmelCase ( self : Optional[int] ) -> Any: for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Dict ) -> Tuple: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[Any] ) -> int: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: for t in [1, 5, 1_0]: self.check_over_forward(time_step=UpperCAmelCase__ ) def __UpperCAmelCase ( self : str ) -> str: for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ): self.check_over_forward(num_inference_steps=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Tuple ) -> str: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 lowerCAmelCase = 2_7 for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): lowerCAmelCase = scheduler.step_prk(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample def __UpperCAmelCase ( self : Union[str, Any] ) -> int: with self.assertRaises(UpperCAmelCase__ ): lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: lowerCAmelCase = self.full_loop() lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_sum.item() - 198.1_318 ) < 1E-2 assert abs(result_mean.item() - 0.2_580 ) < 1E-3 def __UpperCAmelCase ( self : List[Any] ) -> List[Any]: lowerCAmelCase = self.full_loop(prediction_type='v_prediction' ) lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_sum.item() - 67.3_986 ) < 1E-2 assert abs(result_mean.item() - 0.0_878 ) < 1E-3 def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: # We specify different beta, so that the first alpha is 0.99 lowerCAmelCase = self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01 ) lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_sum.item() - 230.0_399 ) < 1E-2 assert abs(result_mean.item() - 0.2_995 ) < 1E-3 def __UpperCAmelCase ( self : Any ) -> Optional[Any]: # We specify different beta, so that the first alpha is 0.99 lowerCAmelCase = self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01 ) lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_sum.item() - 186.9_482 ) < 1E-2 assert abs(result_mean.item() - 0.2_434 ) < 1E-3
4
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets UpperCAmelCase_ : Optional[Any] = datasets.logging.get_logger(__name__) UpperCAmelCase_ : List[str] = """\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\", author = \"Moosavi, Nafise Sadat and Strube, Michael\", booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\", month = aug, year = \"2016\", address = \"Berlin, Germany\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/P16-1060\", doi = \"10.18653/v1/P16-1060\", pages = \"632--642\", } """ UpperCAmelCase_ : Tuple = """\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. """ UpperCAmelCase_ : Union[str, Any] = """ Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting 'keep_singletons=False', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs. min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: 'mentions': mentions 'muc': MUC metric [Vilain et al, 1995] 'bcub': B-cubed [Bagga and Baldwin, 1998] 'ceafe': CEAFe [Luo et al., 2005] 'lea': LEA [Moosavi and Strube, 2016] 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric('coval') >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -', ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)', ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)', ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -', ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -', ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {'mentions/recall': 1.0,[...] 'conll_score': 100.0} """ def _A (__a , __a , __a=False , __a=False , __a=True , __a=False , __a="dummy_doc" ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = {doc: key_lines} SCREAMING_SNAKE_CASE_ : List[str] = {doc: sys_lines} SCREAMING_SNAKE_CASE_ : Dict = {} SCREAMING_SNAKE_CASE_ : Dict = 0 SCREAMING_SNAKE_CASE_ : List[str] = 0 SCREAMING_SNAKE_CASE_ : Tuple = 0 SCREAMING_SNAKE_CASE_ : int = 0 SCREAMING_SNAKE_CASE_ : List[str] = 0 SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a ) key_singletons_num += singletons_num if NP_only or min_span: SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a ) sys_singletons_num += singletons_num if NP_only or min_span: SCREAMING_SNAKE_CASE_ : Union[str, Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a ) if remove_nested: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( '''Number of resulting singleton clusters in the key ''' f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' '''files, respectively''' ) return doc_coref_infos def _A (__a , __a , __a , __a , __a , __a , __a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = get_coref_infos(__a , __a , __a , __a , __a , __a ) SCREAMING_SNAKE_CASE_ : str = {} SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE_ : str = 0 for name, metric in metrics: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = evaluator.evaluate_documents(__a , __a , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} ) logger.info( name.ljust(10 ) , f'Recall: {recall * 1_00:.2f}' , f' Precision: {precision * 1_00:.2f}' , f' F1: {fa * 1_00:.2f}' , ) if conll_subparts_num == 3: SCREAMING_SNAKE_CASE_ : Tuple = (conll / 3) * 1_00 logger.info(f'CoNLL score: {conll:.2f}' ) output_scores.update({'''conll_score''': conll} ) return output_scores def _A (__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: SCREAMING_SNAKE_CASE_ : Any = line.split()[5] if not parse_col == "-": SCREAMING_SNAKE_CASE_ : Any = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''')), '''references''': datasets.Sequence(datasets.Value('''string''')), }) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=True , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: SCREAMING_SNAKE_CASE_ : Union[str, Any] = util.check_gold_parse_annotation(lowercase_) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''') # util.parse_key_file(key_file) # key_file = key_file + ".parsed" SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate( key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , ) return score
91
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = '''▁''' UpperCAmelCase__ = {'''vocab_file''': '''spiece.model'''} UpperCAmelCase__ = { '''vocab_file''': { '''google/reformer-crime-and-punishment''': ( '''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model''' ) } } UpperCAmelCase__ = { '''google/reformer-crime-and-punishment''': 52_4288, } class lowerCamelCase__ ( lowerCAmelCase): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] def __init__(self , UpperCAmelCase , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=[] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: _lowercase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) _lowercase =vocab_file _lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase ) @property def __A (self ) -> Optional[int]: return self.sp_model.get_piece_size() def __A (self ) -> Dict[str, int]: _lowercase ={self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ) -> Union[str, Any]: _lowercase =self.__dict__.copy() _lowercase =None return state def __setstate__(self , UpperCAmelCase ) -> Optional[Any]: _lowercase =d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _lowercase ={} _lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __A (self , UpperCAmelCase ) -> List[str]: return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) def __A (self , UpperCAmelCase ) -> str: return self.sp_model.piece_to_id(UpperCAmelCase ) def __A (self , UpperCAmelCase ) -> Tuple: if index < self.sp_model.get_piece_size(): _lowercase =self.sp_model.IdToPiece(UpperCAmelCase ) return token def __A (self , UpperCAmelCase ) -> str: _lowercase =[] _lowercase ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCAmelCase ) + token _lowercase =[] else: current_sub_tokens.append(UpperCAmelCase ) out_string += self.sp_model.decode(UpperCAmelCase ) return out_string.strip() def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _lowercase =os.path.join( UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , '''wb''' ) as fi: _lowercase =self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (out_vocab_file,)
5
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Tuple = """▁""" UpperCAmelCase_ : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCAmelCase_ : str = { """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } UpperCAmelCase_ : str = { """facebook/xglm-564M""": 2048, } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self : List[Any] , lowercase_ : str , lowercase_ : Tuple="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer SCREAMING_SNAKE_CASE_ : List[str] = 7 SCREAMING_SNAKE_CASE_ : Tuple = [F'<madeupword{i}>' for i in range(self.num_madeup_words)] SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''additional_special_tokens''' , []) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} SCREAMING_SNAKE_CASE_ : List[Any] = len(self.sp_model) SCREAMING_SNAKE_CASE_ : Optional[Any] = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)} self.fairseq_tokens_to_ids.update(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy() SCREAMING_SNAKE_CASE_ : str = None SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Tuple , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} SCREAMING_SNAKE_CASE_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_) if token_ids_a is None: return [1] + ([0] * len(lowercase_)) return [1] + ([0] * len(lowercase_)) + [1, 1] + ([0] * len(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a) * [0] @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str): '''simple docstring''' return self.sp_model.encode(lowercase_ , out_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Union[str, Any]): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.PieceToId(lowercase_) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any]): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(lowercase_).replace(lowercase_ , ''' ''').strip() return out_string def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' if not os.path.isdir(lowercase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , lowercase_) elif not os.path.isfile(self.vocab_file): with open(lowercase_ , '''wb''') as fi: SCREAMING_SNAKE_CASE_ : int = self.sp_model.serialized_model_proto() fi.write(lowercase_) return (out_vocab_file,)
91
0
from __future__ import annotations from cmath import sqrt def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[complex, complex]: if a == 0: raise ValueError('''Coefficient \'a\' must not be zero.''' ) __a = b * b - 4 * a * c __a = (-b + sqrt(a__ )) / (2 * a) __a = (-b - sqrt(a__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def __lowerCAmelCase ( ) -> Tuple: __a , __a = quadratic_roots(a=5 , b=6 , c=1 ) print(F"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
6
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Dict = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16''' self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : str = '''fp16''' self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
91
0
from typing import Dict from .base import GenericTensor, Pipeline class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : int,lowercase_ : Dict=None,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,**lowercase_ : Any )-> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def snake_case__ ( self : Dict,lowercase_ : List[Any],**lowercase_ : Tuple )-> Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework A__ = self.tokenizer(lowercase_,return_tensors=lowercase_,**lowercase_ ) return model_inputs def snake_case__ ( self : Tuple,lowercase_ : int )-> Optional[Any]: '''simple docstring''' A__ = self.model(**lowercase_ ) return model_outputs def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : List[str]=False )-> Any: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any],*lowercase_ : int,**lowercase_ : Optional[Any] )-> int: '''simple docstring''' return super().__call__(*lowercase_,**lowercase_ )
7
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable UpperCAmelCase_ : str = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Dict = ["""GPTNeoXTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[str] = [ """GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXForCausalLM""", """GPTNeoXForQuestionAnswering""", """GPTNeoXForSequenceClassification""", """GPTNeoXForTokenClassification""", """GPTNeoXLayer""", """GPTNeoXModel""", """GPTNeoXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
91
0
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = "efficientnet" def __init__( self : Tuple , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 6_0_0 , _UpperCamelCase : float = 2.0 , _UpperCamelCase : float = 3.1 , _UpperCamelCase : int = 8 , _UpperCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , _UpperCamelCase : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , _UpperCamelCase : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , _UpperCamelCase : List[int] = [] , _UpperCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , _UpperCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , _UpperCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , _UpperCamelCase : float = 0.25 , _UpperCamelCase : str = "swish" , _UpperCamelCase : int = 2_5_6_0 , _UpperCamelCase : str = "mean" , _UpperCamelCase : float = 0.02 , _UpperCamelCase : float = 0.001 , _UpperCamelCase : float = 0.99 , _UpperCamelCase : float = 0.5 , _UpperCamelCase : float = 0.2 , **_UpperCamelCase : Optional[Any] , ) ->str: super().__init__(**_UpperCamelCase ) snake_case_ = num_channels snake_case_ = image_size snake_case_ = width_coefficient snake_case_ = depth_coefficient snake_case_ = depth_divisor snake_case_ = kernel_sizes snake_case_ = in_channels snake_case_ = out_channels snake_case_ = depthwise_padding snake_case_ = strides snake_case_ = num_block_repeats snake_case_ = expand_ratios snake_case_ = squeeze_expansion_ratio snake_case_ = hidden_act snake_case_ = hidden_dim snake_case_ = pooling_type snake_case_ = initializer_range snake_case_ = batch_norm_eps snake_case_ = batch_norm_momentum snake_case_ = dropout_rate snake_case_ = drop_connect_rate snake_case_ = sum(_UpperCamelCase ) * 4 class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = version.parse("1.11" ) @property def snake_case__( self : List[str] ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def snake_case__( self : int ) ->float: return 1e-5
8
"""simple docstring""" import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py UpperCAmelCase_ : Optional[int] = """src/transformers""" UpperCAmelCase_ : Tuple = """docs/source/en""" UpperCAmelCase_ : Optional[Any] = """.""" def _A (__a , __a , __a ) -> Dict: """simple docstring""" with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Dict = f.readlines() # Find the start prompt. SCREAMING_SNAKE_CASE_ : List[Any] = 0 while not lines[start_index].startswith(__a ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : Tuple = start_index while not lines[end_index].startswith(__a ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | UpperCAmelCase_ : Optional[Any] = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. UpperCAmelCase_ : int = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") UpperCAmelCase_ : Dict = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. UpperCAmelCase_ : int = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase_ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH) def _A (__a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __a ) return [m.group(0 ) for m in matches] def _A (__a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = 2 if text == '''✅''' or text == '''❌''' else len(__a ) SCREAMING_SNAKE_CASE_ : Tuple = (width - text_length) // 2 SCREAMING_SNAKE_CASE_ : Tuple = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ : Tuple = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } SCREAMING_SNAKE_CASE_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a ) # Let's lookup through all transformers object (once). for attr_name in dir(__a ): SCREAMING_SNAKE_CASE_ : Any = None if attr_name.endswith('''Tokenizer''' ): SCREAMING_SNAKE_CASE_ : Dict = slow_tokenizers SCREAMING_SNAKE_CASE_ : Dict = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = fast_tokenizers SCREAMING_SNAKE_CASE_ : Optional[Any] = attr_name[:-13] elif _re_tf_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : int = tf_models SCREAMING_SNAKE_CASE_ : Dict = _re_tf_models.match(__a ).groups()[0] elif _re_flax_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : Any = flax_models SCREAMING_SNAKE_CASE_ : Tuple = _re_flax_models.match(__a ).groups()[0] elif _re_pt_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : str = pt_models SCREAMING_SNAKE_CASE_ : int = _re_pt_models.match(__a ).groups()[0] if lookup_dict is not None: while len(__a ) > 0: if attr_name in model_name_to_prefix.values(): SCREAMING_SNAKE_CASE_ : List[str] = True break # Try again after removing the last word in the name SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(camel_case_split(__a )[:-1] ) # Let's build that table! SCREAMING_SNAKE_CASE_ : Any = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) SCREAMING_SNAKE_CASE_ : Any = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). SCREAMING_SNAKE_CASE_ : List[str] = [len(__a ) + 2 for c in columns] SCREAMING_SNAKE_CASE_ : str = max([len(__a ) for name in model_names] ) + 2 # Build the table per se SCREAMING_SNAKE_CASE_ : List[Any] = '''|''' + '''|'''.join([_center_text(__a , __a ) for c, w in zip(__a , __a )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" SCREAMING_SNAKE_CASE_ : Union[str, Any] = {True: '''✅''', False: '''❌'''} for name in model_names: SCREAMING_SNAKE_CASE_ : str = model_name_to_prefix[name] SCREAMING_SNAKE_CASE_ : int = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__a , __a ) for l, w in zip(__a , __a )] ) + "|\n" return table def _A (__a=False ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = _find_text_in_file( filename=os.path.join(__a , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) SCREAMING_SNAKE_CASE_ : Tuple = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__a , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ : Any = parser.parse_args() check_model_table(args.fix_and_overwrite)
91
0
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass __lowerCAmelCase : Dict =(3, 9, -1_1, 0, 7, 5, 1, -1) __lowerCAmelCase : int =(4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class _lowercase : '''simple docstring''' SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : Node | None class _lowercase : '''simple docstring''' def __init__( self :str , lowerCAmelCase__ :Iterable[int] ) -> None: __SCREAMING_SNAKE_CASE : Node | None = None for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : int = Node(lowerCAmelCase__ , self.head ) def __iter__( self :Optional[Any] ) -> Iterator[int]: __SCREAMING_SNAKE_CASE : Tuple = self.head while node: yield node.data __SCREAMING_SNAKE_CASE : int = node.next_node def __len__( self :Optional[int] ) -> int: return sum(1 for _ in self ) def __str__( self :List[Any] ) -> str: return " -> ".join([str(lowerCAmelCase__ ) for node in self] ) def _UpperCamelCase ( lowercase__ , lowercase__ ): return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : Union[str, Any] =SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
9
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : List[str]=13 , lowercase_ : int=7 , lowercase_ : Any=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Dict=99 , lowercase_ : Union[str, Any]=24 , lowercase_ : int=2 , lowercase_ : List[str]=6 , lowercase_ : Any=37 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Any=2 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Optional[int]=None , lowercase_ : str=1000 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = parent SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length SCREAMING_SNAKE_CASE_ : List[Any] = is_training SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE_ : Optional[Any] = use_token_type_ids SCREAMING_SNAKE_CASE_ : int = use_labels SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size SCREAMING_SNAKE_CASE_ : List[str] = hidden_size SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size SCREAMING_SNAKE_CASE_ : Tuple = hidden_act SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Any = initializer_range SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels SCREAMING_SNAKE_CASE_ : Tuple = scope SCREAMING_SNAKE_CASE_ : Optional[int] = range_bbox def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 1] SCREAMING_SNAKE_CASE_ : str = t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE_ : List[str] = bbox[i, j, 2] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 0] SCREAMING_SNAKE_CASE_ : List[str] = t SCREAMING_SNAKE_CASE_ : Tuple = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ : List[str] = None SCREAMING_SNAKE_CASE_ : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ : Any = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = LiltModel(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : int = model(lowercase_ , bbox=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Optional[Any] = LiltForTokenClassification(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Tuple = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = LiltForQuestionAnswering(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : List[str] = config_and_inputs SCREAMING_SNAKE_CASE_ : str = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __UpperCamelCase = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str): '''simple docstring''' return True def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = LiltModelTester(self) SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_ : Dict = type self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Optional[int] = LiltModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) @require_torch @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(lowercase_) SCREAMING_SNAKE_CASE_ : str = torch.tensor([[1, 2]] , device=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Dict = model(input_ids=lowercase_ , bbox=lowercase_) SCREAMING_SNAKE_CASE_ : str = torch.Size([1, 2, 768]) SCREAMING_SNAKE_CASE_ : Dict = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=lowercase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowercase_) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1e-3))
91
0
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = { "nielsr/canine-s": 2048, } # Unicode defines 1,114,112 total “codepoints” __A = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py __A = 0 __A = 0xE000 __A = 0xE001 __A = 0xE002 __A = 0xE003 __A = 0xE004 # Maps special codepoints to human-readable names. __A = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. __A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self : List[Any] , UpperCAmelCase_ : Any=chr(UpperCAmelCase_) , UpperCAmelCase_ : List[Any]=chr(UpperCAmelCase_) , UpperCAmelCase_ : Any=chr(UpperCAmelCase_) , UpperCAmelCase_ : List[Any]=chr(UpperCAmelCase_) , UpperCAmelCase_ : List[str]=chr(UpperCAmelCase_) , UpperCAmelCase_ : Dict=chr(UpperCAmelCase_) , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=2_048 , **UpperCAmelCase_ : Tuple , ) ->Tuple: '''simple docstring''' lowerCamelCase__: List[Any] =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token lowerCamelCase__: Tuple =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token lowerCamelCase__: Tuple =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token lowerCamelCase__: List[Any] =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token lowerCamelCase__: Any =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__: Dict =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , model_max_length=UpperCAmelCase_ , **UpperCAmelCase_ , ) # Creates a mapping for looking up the IDs of special symbols. lowerCamelCase__: Dict[str, int] ={} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCamelCase__: Any =codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCamelCase__: Dict[int, str] ={ codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCamelCase__: Dict =UNICODE_VOCAB_SIZE lowerCamelCase__: Optional[Any] =len(self._special_codepoints) @property def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int: '''simple docstring''' return self._unicode_vocab_size def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str) ->List[str]: '''simple docstring''' return list(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : str) ->int: '''simple docstring''' try: return ord(UpperCAmelCase_) except TypeError: raise ValueError(F"""invalid token: '{token}'""") def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : int) ->str: '''simple docstring''' try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCAmelCase_) except TypeError: raise ValueError(F"""invalid id: {index}""") def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[Any]) ->str: '''simple docstring''' return "".join(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =[self.sep_token_id] lowerCamelCase__: List[Any] =[self.cls_token_id] lowerCamelCase__: List[str] =cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_) lowerCamelCase__: Any =[1] + ([0] * len(UpperCAmelCase_)) + [1] if token_ids_a is not None: result += ([0] * len(UpperCAmelCase_)) + [1] return result def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' lowerCamelCase__: str =[self.sep_token_id] lowerCamelCase__: int =[self.cls_token_id] lowerCamelCase__: List[str] =len(cls + token_ids_a + sep) * [0] if token_ids_a is not None: result += len(token_ids_a + sep) * [1] return result def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->str: '''simple docstring''' return ()
10
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCAmelCase_ : Dict = logging.getLogger(__name__) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser( description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)""" ) parser.add_argument( """--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset.""" ) parser.add_argument( """--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file.""" ) parser.add_argument("""--vocab_size""", default=30522, type=int) UpperCAmelCase_ : Optional[Any] = parser.parse_args() logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file, """rb""") as fp: UpperCAmelCase_ : Union[str, Any] = pickle.load(fp) logger.info("""Counting occurrences for MLM.""") UpperCAmelCase_ : Any = Counter() for tk_ids in data: counter.update(tk_ids) UpperCAmelCase_ : List[Any] = [0] * args.vocab_size for k, v in counter.items(): UpperCAmelCase_ : Dict = v logger.info(f'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, """wb""") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
91
0
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : list ): if not nums: raise ValueError("List is empty" ) return sum(UpperCamelCase__ ) / len(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
11
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) def _A (__a , __a ) -> Tuple: """simple docstring""" try: with open(__a , '''rb''' ) as flax_state_f: SCREAMING_SNAKE_CASE_ : Optional[int] = from_bytes(__a , flax_state_f.read() ) except UnpicklingError as e: try: with open(__a ) as f: if f.read().startswith('''version''' ): raise OSError( '''You seem to have cloned a repository without having git-lfs installed. Please''' ''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the''' ''' folder you cloned.''' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(__a , __a ) def _A (__a , __a ) -> Tuple: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights SCREAMING_SNAKE_CASE_ : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , __a ) ).values() if any(__a ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.tree_util.tree_map( lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __a ) SCREAMING_SNAKE_CASE_ : int = '''''' SCREAMING_SNAKE_CASE_ : str = flatten_dict(__a , sep='''.''' ) SCREAMING_SNAKE_CASE_ : List[Any] = pt_model.state_dict() # keep track of unexpected & missing keys SCREAMING_SNAKE_CASE_ : str = [] SCREAMING_SNAKE_CASE_ : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple.split('''.''' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple_array[:-1] + ['''weight'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.transpose(__a , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": SCREAMING_SNAKE_CASE_ : Tuple = flax_key_tuple_array[:-1] + ['''weight'''] SCREAMING_SNAKE_CASE_ : Optional[int] = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": SCREAMING_SNAKE_CASE_ : Optional[int] = flax_key_tuple_array[:-1] + ['''weight'''] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(__a ): SCREAMING_SNAKE_CASE_ : List[str] = ( flax_key_tuple_string.replace('''_0''' , '''.0''' ) .replace('''_1''' , '''.1''' ) .replace('''_2''' , '''.2''' ) .replace('''_3''' , '''.3''' ) .replace('''_4''' , '''.4''' ) .replace('''_5''' , '''.5''' ) .replace('''_6''' , '''.6''' ) .replace('''_7''' , '''.7''' ) .replace('''_8''' , '''.8''' ) .replace('''_9''' , '''.9''' ) ) SCREAMING_SNAKE_CASE_ : Optional[Any] = '''.'''.join(__a ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(__a ) if not isinstance(__a , np.ndarray ) else flax_tensor SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(__a ) # remove from missing keys missing_keys.remove(__a ) else: # weight is not expected by PyTorch model unexpected_keys.append(__a ) pt_model.load_state_dict(__a ) # re-transform missing_keys to list SCREAMING_SNAKE_CASE_ : int = list(__a ) if len(__a ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) if len(__a ) > 0: logger.warning( f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) return pt_model
91
0
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: UpperCAmelCase_ = None UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase_ = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } UpperCAmelCase_ = { 'facebook/nllb-large-en-ro': 1_024, 'facebook/nllb-200-distilled-600M': 1_024, } # fmt: off UpperCAmelCase_ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : str = ['input_ids', 'attention_mask'] UpperCAmelCase__ : str = NllbTokenizer UpperCAmelCase__ : List[int] = [] UpperCAmelCase__ : List[int] = [] def __init__( self: Dict , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Any="<s>" , UpperCamelCase_: Any="</s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: int="<unk>" , UpperCamelCase_: Union[str, Any]="<pad>" , UpperCamelCase_: Union[str, Any]="<mask>" , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: str=None , UpperCamelCase_: Dict=None , UpperCamelCase_: Optional[Any]=False , **UpperCamelCase_: int , ): # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token __lowerCamelCase = legacy_behaviour super().__init__( vocab_file=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , legacy_behaviour=UpperCamelCase_ , **UpperCamelCase_ , ) __lowerCamelCase = vocab_file __lowerCamelCase = False if not self.vocab_file else True __lowerCamelCase = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) __lowerCamelCase = { lang_code: self.convert_tokens_to_ids(UpperCamelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __lowerCamelCase = src_lang if src_lang is not None else """eng_Latn""" __lowerCamelCase = self.convert_tokens_to_ids(self._src_lang ) __lowerCamelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def lowerCAmelCase__ ( self: int ): return self._src_lang @src_lang.setter def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ): __lowerCamelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ): __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Optional[str] , UpperCamelCase_: Optional[str] , **UpperCamelCase_: int ): if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) __lowerCamelCase = src_lang __lowerCamelCase = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = self.convert_tokens_to_ids(UpperCamelCase_ ) __lowerCamelCase = tgt_lang_id return inputs def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str = "eng_Latn" , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "fra_Latn" , **UpperCamelCase_: Optional[int] , ): __lowerCamelCase = src_lang __lowerCamelCase = tgt_lang return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Dict ): return self.set_src_lang_special_tokens(self.src_lang ) def lowerCAmelCase__ ( self: List[str] ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Optional[int] ): __lowerCamelCase = self.convert_tokens_to_ids(UpperCamelCase_ ) if self.legacy_behaviour: __lowerCamelCase = [] __lowerCamelCase = [self.eos_token_id, self.cur_lang_code] else: __lowerCamelCase = [self.cur_lang_code] __lowerCamelCase = [self.eos_token_id] __lowerCamelCase = self.convert_ids_to_tokens(self.prefix_tokens ) __lowerCamelCase = self.convert_ids_to_tokens(self.suffix_tokens ) __lowerCamelCase = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str ): __lowerCamelCase = self.convert_tokens_to_ids(UpperCamelCase_ ) if self.legacy_behaviour: __lowerCamelCase = [] __lowerCamelCase = [self.eos_token_id, self.cur_lang_code] else: __lowerCamelCase = [self.cur_lang_code] __lowerCamelCase = [self.eos_token_id] __lowerCamelCase = self.convert_ids_to_tokens(self.prefix_tokens ) __lowerCamelCase = self.convert_ids_to_tokens(self.suffix_tokens ) __lowerCamelCase = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(UpperCamelCase_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return __lowerCamelCase = os.path.join( UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
12
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Any = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "openai-gpt" __UpperCamelCase = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[str] , lowercase_ : List[str]=40478 , lowercase_ : List[str]=512 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=1e-5 , lowercase_ : int=0.02 , lowercase_ : Optional[int]="cls_index" , lowercase_ : Any=True , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=0.1 , **lowercase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE_ : Tuple = n_positions SCREAMING_SNAKE_CASE_ : Optional[int] = n_embd SCREAMING_SNAKE_CASE_ : Dict = n_layer SCREAMING_SNAKE_CASE_ : Any = n_head SCREAMING_SNAKE_CASE_ : Union[str, Any] = afn SCREAMING_SNAKE_CASE_ : int = resid_pdrop SCREAMING_SNAKE_CASE_ : List[str] = embd_pdrop SCREAMING_SNAKE_CASE_ : Union[str, Any] = attn_pdrop SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range SCREAMING_SNAKE_CASE_ : List[str] = summary_type SCREAMING_SNAKE_CASE_ : Tuple = summary_use_proj SCREAMING_SNAKE_CASE_ : Union[str, Any] = summary_activation SCREAMING_SNAKE_CASE_ : Any = summary_first_dropout SCREAMING_SNAKE_CASE_ : List[str] = summary_proj_to_labels super().__init__(**lowercase_)
91
0
class __lowercase : """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]): SCREAMING_SNAKE_CASE_: List[str] = name SCREAMING_SNAKE_CASE_: Union[str, Any] = val def __str__( self : Dict): return F"{self.__class__.__name__}({self.name}, {self.val})" def __lt__( self : List[str] , lowerCAmelCase__ : Any): return self.val < other.val class __lowercase : """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: int = {} SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__) def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict): return self.get_value(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict): return (idx - 1) // 2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]): return idx * 2 + 1 def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple): return idx * 2 + 2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]): return self.heap_dict[key] def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1 SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__) for idx, i in enumerate(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Union[str, Any] = idx SCREAMING_SNAKE_CASE_: str = i.val for i in range(lowerCAmelCase__ , -1 , -1): self.sift_down(lowerCAmelCase__ , lowerCAmelCase__) return array def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]): while True: SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741 SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = idx if l < len(lowerCAmelCase__) and array[l] < array[idx]: SCREAMING_SNAKE_CASE_: List[str] = l if r < len(lowerCAmelCase__) and array[r] < array[smallest]: SCREAMING_SNAKE_CASE_: str = r if smallest != idx: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx] ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ): Optional[Any] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) SCREAMING_SNAKE_CASE_: Optional[int] = smallest else: break def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__) while p >= 0 and self.heap[p] > self.heap[idx]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) SCREAMING_SNAKE_CASE_: Union[str, Any] = p SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): return self.heap[0] def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) SCREAMING_SNAKE_CASE_: int = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap) return x def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple): self.heap.append(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1 SCREAMING_SNAKE_CASE_: List[str] = node.val self.sift_up(len(self.heap) - 1) def _SCREAMING_SNAKE_CASE ( self : List[Any]): return len(self.heap) == 0 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" SCREAMING_SNAKE_CASE_: Any = new_value SCREAMING_SNAKE_CASE_: Tuple = new_value self.sift_up(self.idx_of_element[node]) lowerCAmelCase : int = Node("""R""", -1) lowerCAmelCase : str = Node("""B""", 6) lowerCAmelCase : str = Node("""A""", 3) lowerCAmelCase : List[str] = Node("""X""", 1) lowerCAmelCase : Union[str, Any] = Node("""E""", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("""Min Heap - before decrease key""") for i in my_min_heap.heap: print(i) print("""Min Heap - After decrease key of node [B -> -17]""") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
13
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[str] , *lowercase_ : Dict , **lowercase_ : Union[str, Any]): '''simple docstring''' warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_)
91
0
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets _lowerCamelCase : Dict = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ _lowerCamelCase : Union[str, Any] = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ _lowerCamelCase : Optional[Any] = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ] , ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Any=0.9 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=500 , UpperCAmelCase__ : str="gpt2-large" , UpperCAmelCase__ : Dict=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : Any=25 , UpperCAmelCase__ : Optional[int]=5 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=25 , ) ->Any: '''simple docstring''' A__ = compute_mauve( p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , ) return out
14
"""simple docstring""" import random from typing import Any def _A (__a ) -> list[Any]: """simple docstring""" for _ in range(len(__a ) ): SCREAMING_SNAKE_CASE_ : Optional[int] = random.randint(0 , len(__a ) - 1 ) SCREAMING_SNAKE_CASE_ : Tuple = random.randint(0 , len(__a ) - 1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = data[b], data[a] return data if __name__ == "__main__": UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5, 6, 7] UpperCAmelCase_ : Dict = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
91
0
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__) def UpperCAmelCase ( a_=None , a_=None ) -> Any: """simple docstring""" return field(default_factory=lambda: default , metadata=a_ ) @dataclass class UpperCAmelCase : '''simple docstring''' snake_case_ = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) snake_case_ = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) snake_case_ = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) snake_case_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) snake_case_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) snake_case_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Use FP16 to accelerate inference."} ) snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Benchmark training of model"} ) snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Verbose memory tracing"} ) snake_case_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) snake_case_ = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Trace memory line by line"} ) snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Save result to a CSV file"} ) snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Save all print statements in a log file"} ) snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to print environment information"} ) snake_case_ = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) snake_case_ = field( default=F"""inference_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv."} , ) snake_case_ = field( default=F"""inference_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv."} , ) snake_case_ = field( default=F"""train_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) snake_case_ = field( default=F"""train_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) snake_case_ = field( default=F"""env_info_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving environment information."} , ) snake_case_ = field( default=F"""log_{round(time() )}.csv""" , metadata={"help": "Log filename used if print statements are saved in log."} , ) snake_case_ = field(default=3 , metadata={"help": "Times an experiment will be run."} ) snake_case_ = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def UpperCamelCase_ ( self : List[str] ): warnings.warn( f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils''' " are deprecated in general and it is advised to use external Benchmarking libraries " " to benchmark Transformer models." ,A ,) def UpperCamelCase_ ( self : Tuple ): return json.dumps(dataclasses.asdict(self ) ,indent=2 ) @property def UpperCamelCase_ ( self : Union[str, Any] ): if len(self.models ) <= 0: raise ValueError( "Please make sure you provide at least one model name / model identifier, *e.g.* `--models" " bert-base-cased` or `args.models = ['bert-base-cased']." ) return self.models @property def UpperCamelCase_ ( self : List[str] ): if not self.multi_process: return False elif self.is_tpu: logger.info("Multiprocessing is currently not possible on TPU." ) return False else: return True
15
"""simple docstring""" import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _A (__a , __a , __a ) -> Dict: """simple docstring""" if gpta_config_file == "": SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaConfig() else: SCREAMING_SNAKE_CASE_ : Tuple = GPTaConfig.from_json_file(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaModel(__a ) # Load weights from numpy load_tf_weights_in_gpta(__a , __a , __a ) # Save pytorch-model SCREAMING_SNAKE_CASE_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME SCREAMING_SNAKE_CASE_ : List[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , __a ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(__a , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--gpt2_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
91
0
"""simple docstring""" import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]: # Initialise PyTorch model lowercase__ : List[Any] = RemBertConfig.from_json_file(__lowerCamelCase ) print('''Building PyTorch model from configuration: {}'''.format(str(__lowerCamelCase ) ) ) lowercase__ : Dict = RemBertModel(__lowerCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Save pytorch-model print('''Save PyTorch model to {}'''.format(__lowerCamelCase ) ) torch.save(model.state_dict() , __lowerCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--rembert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained RemBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase_ = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
16
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
91
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class _lowerCAmelCase : """simple docstring""" def __init__( self : List[str], UpperCAmelCase__ : Tuple, ): __lowercase = parent __lowercase = 1_3 __lowercase = 7 __lowercase = True __lowercase = True __lowercase = False __lowercase = True __lowercase = 9_9 __lowercase = 3_2 __lowercase = 2 __lowercase = 4 __lowercase = 3_7 __lowercase = "gelu" __lowercase = 0.1 __lowercase = 0.1 __lowercase = 5_1_2 __lowercase = 1_6 __lowercase = 2 __lowercase = 0.02 __lowercase = 3 __lowercase = 4 __lowercase = None def _lowercase ( self : int ): __lowercase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) __lowercase = ids_tensor([self.batch_size], self.num_choices ) __lowercase = DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : str, UpperCAmelCase__ : Dict, UpperCAmelCase__ : int, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[int] ): __lowercase = TFDistilBertModel(config=UpperCAmelCase__ ) __lowercase = {"input_ids": input_ids, "attention_mask": input_mask} __lowercase = model(UpperCAmelCase__ ) __lowercase = [input_ids, input_mask] __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[str] ): __lowercase = TFDistilBertForMaskedLM(config=UpperCAmelCase__ ) __lowercase = {"input_ids": input_ids, "attention_mask": input_mask} __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : int, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any ): __lowercase = TFDistilBertForQuestionAnswering(config=UpperCAmelCase__ ) __lowercase = { "input_ids": input_ids, "attention_mask": input_mask, } __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def _lowercase ( self : Any, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Any ): __lowercase = self.num_labels __lowercase = TFDistilBertForSequenceClassification(UpperCAmelCase__ ) __lowercase = {"input_ids": input_ids, "attention_mask": input_mask} __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def _lowercase ( self : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Tuple ): __lowercase = self.num_choices __lowercase = TFDistilBertForMultipleChoice(UpperCAmelCase__ ) __lowercase = tf.tile(tf.expand_dims(UpperCAmelCase__, 1 ), (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(UpperCAmelCase__, 1 ), (1, self.num_choices, 1) ) __lowercase = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : str, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ): __lowercase = self.num_labels __lowercase = TFDistilBertForTokenClassification(UpperCAmelCase__ ) __lowercase = {"input_ids": input_ids, "attention_mask": input_mask} __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Dict ): __lowercase = self.prepare_config_and_inputs() ((__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase)) = config_and_inputs __lowercase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : int = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) __UpperCAmelCase : Union[str, Any] = ( { "feature-extraction": TFDistilBertModel, "fill-mask": TFDistilBertForMaskedLM, "question-answering": TFDistilBertForQuestionAnswering, "text-classification": TFDistilBertForSequenceClassification, "token-classification": TFDistilBertForTokenClassification, "zero-shot": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) __UpperCAmelCase : str = False __UpperCAmelCase : List[str] = False def _lowercase ( self : Optional[int] ): __lowercase = TFDistilBertModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, dim=3_7 ) def _lowercase ( self : str ): self.config_tester.run_common_tests() def _lowercase ( self : Optional[int] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ ) def _lowercase ( self : Dict ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ ) def _lowercase ( self : List[Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ ) def _lowercase ( self : Dict ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ ) def _lowercase ( self : List[str] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ ) def _lowercase ( self : List[str] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ ) @slow def _lowercase ( self : str ): for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): __lowercase = TFDistilBertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : Union[str, Any] ): __lowercase = TFDistilBertModel.from_pretrained("distilbert-base-uncased" ) __lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowercase = model(UpperCAmelCase__ )[0] __lowercase = [1, 6, 7_6_8] self.assertEqual(output.shape, UpperCAmelCase__ ) __lowercase = tf.constant( [ [ [0.19_261_885, -0.13_732_955, 0.4_119_799], [0.22_150_156, -0.07_422_661, 0.39_037_204], [0.22_756_018, -0.0_896_414, 0.3_701_467], ] ] ) tf.debugging.assert_near(output[:, :3, :3], UpperCAmelCase__, atol=1E-4 )
17
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL UpperCAmelCase_ : int = logging.get_logger(__name__) def _A (__a ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__a ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ): '''simple docstring''' super().__init__(**lowercase_) SCREAMING_SNAKE_CASE_ : str = size if size is not None else {'''shortest_edge''': 256} SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''') SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize SCREAMING_SNAKE_CASE_ : List[Any] = size SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop SCREAMING_SNAKE_CASE_ : Dict = crop_size SCREAMING_SNAKE_CASE_ : List[Any] = resample SCREAMING_SNAKE_CASE_ : List[str] = do_rescale SCREAMING_SNAKE_CASE_ : List[str] = rescale_factor SCREAMING_SNAKE_CASE_ : List[Any] = offset SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize SCREAMING_SNAKE_CASE_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_) if "shortest_edge" in size: SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(lowercase_ , size['''shortest_edge'''] , default_to_square=lowercase_) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE_ : Union[str, Any] = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}') return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_) if "height" not in size or "width" not in size: raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}') return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = image.astype(np.floataa) if offset: SCREAMING_SNAKE_CASE_ : Tuple = image - (scale / 2) return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''') # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ : List[str] = to_numpy_array(lowercase_) if do_resize: SCREAMING_SNAKE_CASE_ : List[Any] = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) if do_center_crop: SCREAMING_SNAKE_CASE_ : Dict = self.center_crop(lowercase_ , size=lowercase_) if do_rescale: SCREAMING_SNAKE_CASE_ : int = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_) if do_normalize: SCREAMING_SNAKE_CASE_ : Dict = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = to_channel_dimension_format(lowercase_ , lowercase_) return image def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ : Dict = offset if offset is not None else self.offset SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ : Dict = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''') if not valid_images(lowercase_): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') SCREAMING_SNAKE_CASE_ : Tuple = make_batched(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = [ [ self._preprocess_image( image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , ) for img in video ] for video in videos ] SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': videos} return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
91
0
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class a__ ( unittest.TestCase ): def __UpperCamelCase ( self : int,_A : List[Any] ): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"],model_result["ss"] ): SCREAMING_SNAKE_CASE_ : Any = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(_A ) def __UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],eager_mode=_A,multi_process=_A,) SCREAMING_SNAKE_CASE_ : List[str] = TensorFlowBenchmark(_A ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCamelCase ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "sgugger/tiny-distilbert-classification" SCREAMING_SNAKE_CASE_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],multi_process=_A,only_pretrain_model=_A,) SCREAMING_SNAKE_CASE_ : Optional[Any] = TensorFlowBenchmark(_A ) SCREAMING_SNAKE_CASE_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],multi_process=_A,) SCREAMING_SNAKE_CASE_ : Any = TensorFlowBenchmark(_A ) SCREAMING_SNAKE_CASE_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCamelCase ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained(_A ) SCREAMING_SNAKE_CASE_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],eager_mode=_A,multi_process=_A,) SCREAMING_SNAKE_CASE_ : Tuple = TensorFlowBenchmark(_A,[config] ) SCREAMING_SNAKE_CASE_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE_ : List[Any] = AutoConfig.from_pretrained(_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],multi_process=_A,) SCREAMING_SNAKE_CASE_ : Any = TensorFlowBenchmark(_A,[config] ) SCREAMING_SNAKE_CASE_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],multi_process=_A,) SCREAMING_SNAKE_CASE_ : Dict = TensorFlowBenchmark(_A ) SCREAMING_SNAKE_CASE_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE_ : Any = AutoConfig.from_pretrained(_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],multi_process=_A,) SCREAMING_SNAKE_CASE_ : Optional[Any] = TensorFlowBenchmark(_A,[config] ) SCREAMING_SNAKE_CASE_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = "patrickvonplaten/t5-tiny-random" SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoConfig.from_pretrained(_A ) SCREAMING_SNAKE_CASE_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],multi_process=_A,) SCREAMING_SNAKE_CASE_ : List[str] = TensorFlowBenchmark(_A,configs=[config] ) SCREAMING_SNAKE_CASE_ : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0,"Cannot do xla on CPU." ) def __UpperCamelCase ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE_ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],use_xla=_A,multi_process=_A,) SCREAMING_SNAKE_CASE_ : Union[str, Any] = TensorFlowBenchmark(_A ) SCREAMING_SNAKE_CASE_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID],inference=_A,save_to_csv=_A,sequence_lengths=[8],batch_sizes=[1],inference_time_csv_file=os.path.join(_A,"inf_time.csv" ),inference_memory_csv_file=os.path.join(_A,"inf_mem.csv" ),env_info_csv_file=os.path.join(_A,"env.csv" ),multi_process=_A,) SCREAMING_SNAKE_CASE_ : Dict = TensorFlowBenchmark(_A ) benchmark.run() self.assertTrue(Path(os.path.join(_A,"inf_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(_A,"inf_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(_A,"env.csv" ) ).exists() ) def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(_A : Optional[Any] ): self.assertTrue(hasattr(_A,"sequential" ) ) self.assertTrue(hasattr(_A,"cumulative" ) ) self.assertTrue(hasattr(_A,"current" ) ) self.assertTrue(hasattr(_A,"total" ) ) with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID],inference=_A,sequence_lengths=[8],batch_sizes=[1],log_filename=os.path.join(_A,"log.txt" ),log_print=_A,trace_memory_line_by_line=_A,eager_mode=_A,multi_process=_A,) SCREAMING_SNAKE_CASE_ : Tuple = TensorFlowBenchmark(_A ) SCREAMING_SNAKE_CASE_ : int = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_A,"log.txt" ) ).exists() )
18
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase_ : Dict = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } UpperCAmelCase_ : List[str] = { """gpt2""": 1024, """gpt2-medium""": 1024, """gpt2-large""": 1024, """gpt2-xl""": 1024, """distilgpt2""": 1024, } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] __UpperCamelCase = GPTaTokenizer def __init__( self : Optional[int] , lowercase_ : int=None , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple="<|endoftext|>" , lowercase_ : str="<|endoftext|>" , lowercase_ : Dict="<|endoftext|>" , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ): '''simple docstring''' super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('''add_bos_token''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , lowercase_) != add_prefix_space: SCREAMING_SNAKE_CASE_ : int = getattr(lowercase_ , pre_tok_state.pop('''type''')) SCREAMING_SNAKE_CASE_ : str = add_prefix_space SCREAMING_SNAKE_CASE_ : Dict = pre_tok_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = add_prefix_space def _SCREAMING_SNAKE_CASE ( self : str , *lowercase_ : List[Any] , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''is_split_into_words''' , lowercase_) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowercase_ : List[str] , **lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = kwargs.get('''is_split_into_words''' , lowercase_) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_) return tuple(lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : "Conversation"): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_) + [self.eos_token_id]) if len(lowercase_) > self.model_max_length: SCREAMING_SNAKE_CASE_ : Any = input_ids[-self.model_max_length :] return input_ids
91
0
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if density <= 0: raise ValueError("Impossible fluid density" ) if bulk_modulus <= 0: raise ValueError("Impossible bulk modulus" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
19
"""simple docstring""" import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(lowercase_ , '''hidden_sizes''')) self.parent.assertTrue(hasattr(lowercase_ , '''num_attention_heads''')) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=13 , lowercase_ : Dict=64 , lowercase_ : Dict=3 , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=2 , lowercase_ : Any=1 , lowercase_ : List[Any]=16 , lowercase_ : int=[128, 256, 384] , lowercase_ : str=[4, 6, 8] , lowercase_ : Optional[Any]=[2, 3, 4] , lowercase_ : Union[str, Any]=[16, 16, 16] , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[int]=[2, 2, 2] , lowercase_ : Any=[2, 2, 2] , lowercase_ : List[str]=0.02 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=2 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = parent SCREAMING_SNAKE_CASE_ : Any = batch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size SCREAMING_SNAKE_CASE_ : int = num_channels SCREAMING_SNAKE_CASE_ : List[Any] = kernel_size SCREAMING_SNAKE_CASE_ : Optional[Any] = stride SCREAMING_SNAKE_CASE_ : List[str] = padding SCREAMING_SNAKE_CASE_ : int = hidden_sizes SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE_ : int = depths SCREAMING_SNAKE_CASE_ : Optional[Any] = key_dim SCREAMING_SNAKE_CASE_ : Optional[Any] = drop_path_rate SCREAMING_SNAKE_CASE_ : Tuple = patch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_ratio SCREAMING_SNAKE_CASE_ : str = mlp_ratio SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE_ : List[Any] = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] SCREAMING_SNAKE_CASE_ : Any = is_training SCREAMING_SNAKE_CASE_ : Tuple = use_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_labels) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = LevitModel(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_) SCREAMING_SNAKE_CASE_ : Any = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = image_size[0], image_size[1] for _ in range(4): SCREAMING_SNAKE_CASE_ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1) SCREAMING_SNAKE_CASE_ : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.num_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitForImageClassification(lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) __UpperCamelCase = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitModelTester(self) SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' return @unittest.skip(reason='''Levit does not use inputs_embeds''') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' pass @unittest.skip(reason='''Levit does not support input and output embeddings''') def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' pass @unittest.skip(reason='''Levit does not output attentions''') def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ : Dict = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' def check_hidden_states_output(lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str): SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_)) SCREAMING_SNAKE_CASE_ : str = outputs.hidden_states SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.model_tester.depths) + 1 self.assertEqual(len(lowercase_) , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size[0], image_size[1] for _ in range(4): SCREAMING_SNAKE_CASE_ : Optional[Any] = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1) SCREAMING_SNAKE_CASE_ : Optional[int] = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ : Tuple = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : Union[str, Any] = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowercase_) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_) model.to(lowercase_) model.train() SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase_).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ : Union[str, Any] = False SCREAMING_SNAKE_CASE_ : Optional[int] = True for model_class in self.all_model_classes: if model_class in get_values(lowercase_) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE_ : List[str] = model_class(lowercase_) model.gradient_checkpointing_enable() model.to(lowercase_) model.train() SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model(**lowercase_).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : List[Any] = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowercase_), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}'): SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''title'''] SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''num_labels'''] SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_) model.to(lowercase_) model.train() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE_ : str = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels''']) SCREAMING_SNAKE_CASE_ : Any = inputs['''labels'''].to(problem_type['''dtype''']) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowercase_) as warning_list: SCREAMING_SNAKE_CASE_ : int = model(**lowercase_).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( F'Something is going wrong in the regression problem: intercepted {w.message}') loss.backward() @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Optional[Any] = LevitModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) def _A () -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.default_image_processor SCREAMING_SNAKE_CASE_ : str = prepare_img() SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Any = model(**lowercase_) # verify the logits SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([1.04_48, -0.37_45, -1.83_17]).to(lowercase_) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
91
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Optional[Any] = logging.get_logger(__name__) lowercase : Optional[int] = { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""", """umberto-commoncrawl-cased-v1""": ( """https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json""" ), """umberto-wikipedia-uncased-v1""": ( """https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json""" ), } class __snake_case ( lowerCAmelCase ): _a : Any= "camembert" def __init__( self ,snake_case=30522 ,snake_case=768 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=2 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=1 ,snake_case=0 ,snake_case=2 ,snake_case="absolute" ,snake_case=True ,snake_case=None ,**snake_case ,): '''simple docstring''' super().__init__(pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case ) lowercase : List[Any] = vocab_size lowercase : Tuple = hidden_size lowercase : Union[str, Any] = num_hidden_layers lowercase : List[str] = num_attention_heads lowercase : Optional[Any] = hidden_act lowercase : Tuple = intermediate_size lowercase : Any = hidden_dropout_prob lowercase : List[str] = attention_probs_dropout_prob lowercase : Dict = max_position_embeddings lowercase : Tuple = type_vocab_size lowercase : Union[str, Any] = initializer_range lowercase : Tuple = layer_norm_eps lowercase : Dict = position_embedding_type lowercase : Union[str, Any] = use_cache lowercase : Optional[int] = classifier_dropout class __snake_case ( lowerCAmelCase ): @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' if self.task == "multiple-choice": lowercase : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase : Union[str, Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
20
"""simple docstring""" from math import factorial def _A (__a = 20 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... SCREAMING_SNAKE_CASE_ : List[str] = n // 2 return int(factorial(__a ) / (factorial(__a ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: UpperCAmelCase_ : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number.""")
91
0
from ...processing_utils import ProcessorMixin class _lowerCamelCase( _a ): lowercase_ : List[Any] = """WhisperFeatureExtractor""" lowercase_ : List[str] = """WhisperTokenizer""" def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict: """simple docstring""" super().__init__(lowerCamelCase, lowerCamelCase) _lowercase : Dict = self.feature_extractor _lowercase : int = False def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=True) -> List[str]: """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase, language=lowerCamelCase, no_timestamps=lowerCamelCase) def __call__( self, *lowerCamelCase, **lowerCamelCase) -> Dict: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*lowerCamelCase, **lowerCamelCase) _lowercase : Any = kwargs.pop('audio', lowerCamelCase) _lowercase : Union[str, Any] = kwargs.pop('sampling_rate', lowerCamelCase) _lowercase : List[str] = kwargs.pop('text', lowerCamelCase) if len(lowerCamelCase) > 0: _lowercase : int = args[0] _lowercase : Union[str, Any] = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.') if audio is not None: _lowercase : Dict = self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase) if text is not None: _lowercase : Dict = self.tokenizer(lowerCamelCase, **lowerCamelCase) if text is None: return inputs elif audio is None: return encodings else: _lowercase : Union[str, Any] = encodings['input_ids'] return inputs def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Tuple: """simple docstring""" return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase="np") -> str: """simple docstring""" return self.tokenizer.get_prompt_ids(lowerCamelCase, return_tensors=lowerCamelCase)
21
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCAmelCase_ : Any = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : List[str]): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_)
91
0
'''simple docstring''' import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __SCREAMING_SNAKE_CASE :Any = { '''/attention/''': '''/0/SelfAttention/''', '''/self_attention/''': '''/0/SelfAttention/''', '''/encoder_decoder_attention/''': '''/1/EncDecAttention/''', '''value''': '''v''', '''query''': '''q''', '''key''': '''k''', '''out''': '''o''', '''pre_self_attention_layer_norm''': '''0/layer_norm''', '''pre_cross_attention_layer_norm''': '''1/layer_norm''', '''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong '''token_embedder''': '''shared''', '''encoder_norm''': '''final_layer_norm''', '''decoder_norm''': '''final_layer_norm''', '''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''', '''router/router_weights/w/''': '''router/classifier/''', '''roer/roer_weights/w/''': '''router/classifier/''', '''logits_dense''': '''lm_head''', } def UpperCAmelCase_ ( __lowercase : Dict ) -> Tuple: '''simple docstring''' _UpperCAmelCase = list(s_dict.keys() ) for key in keys: _UpperCAmelCase = r".*/layers_(\d+)" _UpperCAmelCase = key if re.match(__lowercase , __lowercase ): _UpperCAmelCase = re.sub(r"layers_(\d+)" , r"block/\1/layer" , __lowercase ) _UpperCAmelCase = r"(encoder|decoder)\/" if re.match(__lowercase , __lowercase ): _UpperCAmelCase = re.match(__lowercase , __lowercase ).groups() if groups[0] == "encoder": _UpperCAmelCase = re.sub(r"/mlp/" , r"/1/mlp/" , __lowercase ) _UpperCAmelCase = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , __lowercase ) elif groups[0] == "decoder": _UpperCAmelCase = re.sub(r"/mlp/" , r"/2/mlp/" , __lowercase ) _UpperCAmelCase = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , __lowercase ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: _UpperCAmelCase = new_key.replace(__lowercase , __lowercase ) print(f'{key} -> {new_key}' ) _UpperCAmelCase = s_dict.pop(__lowercase ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: _UpperCAmelCase = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: _UpperCAmelCase = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: _UpperCAmelCase = s_dict[key].shape[0] _UpperCAmelCase = s_dict[key] for idx in range(__lowercase ): _UpperCAmelCase = expert_weihts[idx] print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' ) s_dict.pop(__lowercase ) return s_dict __SCREAMING_SNAKE_CASE :Any = { '''NUM_ENCODER_LAYERS''': '''num_layers''', '''NUM_DECODER_LAYERS''': '''num_decoder_layers''', '''NUM_HEADS''': '''num_heads''', '''HEAD_DIM''': '''d_kv''', '''EMBED_DIM''': '''d_model''', '''MLP_DIM''': '''d_ff''', '''NUM_SELECTED_EXPERTS''': '''num_selected_experts''', '''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''', '''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''', '''dense.MlpBlock.activations''': '''feed_forward_proj''', } def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : List[str] ) -> Dict: '''simple docstring''' import regex as re with open(__lowercase , "r" ) as f: _UpperCAmelCase = f.read() _UpperCAmelCase = re.findall(r"(.*) = ([0-9.]*)" , __lowercase ) _UpperCAmelCase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": _UpperCAmelCase = float(__lowercase ) if "." in value else int(__lowercase ) _UpperCAmelCase = re.findall(r"(.*activations) = \(\'(.*)\',\)" , __lowercase )[0] _UpperCAmelCase = str(activation[1] ) _UpperCAmelCase = num_experts _UpperCAmelCase = SwitchTransformersConfig(**__lowercase ) return config def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : List[str] , __lowercase : str=None , __lowercase : List[str]="./" , __lowercase : Optional[int]=8 ) -> Optional[int]: '''simple docstring''' print(f'Loading flax weights from : {flax_checkpoint_path}' ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(__lowercase ) if gin_file is not None: _UpperCAmelCase = convert_gin_to_config(__lowercase , __lowercase ) else: _UpperCAmelCase = SwitchTransformersConfig.from_pretrained(__lowercase ) _UpperCAmelCase = SwitchTransformersForConditionalGeneration(__lowercase ) _UpperCAmelCase = flax_params["target"] _UpperCAmelCase = flatten_dict(__lowercase , sep="/" ) _UpperCAmelCase = rename_keys(__lowercase ) _UpperCAmelCase = unflatten_dict(__lowercase , sep="/" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(__lowercase , __lowercase ) print(f'Save PyTorch model to {pytorch_dump_path}' ) pt_model.save_pretrained(__lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--switch_t5x_checkpoint_path''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the''' ''' model architecture. If not provided, a `gin_file` has to be provided.''' ), ) parser.add_argument( '''--gin_file''', default=None, type=str, required=False, help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''', ) parser.add_argument( '''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.''' ) parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''') __SCREAMING_SNAKE_CASE :Tuple = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
22
"""simple docstring""" from __future__ import annotations class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : int = 0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE_ : List[str] = '''''' for ch in content: ans += chr(ord(lowercase_) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE_ : List[Any] = '''''' for ch in content: ans += chr(ord(lowercase_) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) try: with open(lowercase_) as fin, open('''encrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowercase_ , lowercase_)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) try: with open(lowercase_) as fin, open('''decrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowercase_ , lowercase_)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
91
0
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__: str = logging.get_logger(__name__) UpperCamelCase__: str = {"vocab_file": "vocab.json"} UpperCamelCase__: Optional[int] = { "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } UpperCamelCase__: List[str] = {"mgp-str": 27} class SCREAMING_SNAKE_CASE( A__ ): """simple docstring""" lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any]="[GO]" , __snake_case : List[Any]="[GO]" , __snake_case : Union[str, Any]="[s]" , __snake_case : Optional[Any]="[GO]" , **__snake_case : List[Any] ) -> Union[str, Any]: super().__init__( unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , **__snake_case , ) with open(__snake_case , encoding='''utf-8''' ) as vocab_handle: UpperCAmelCase : Union[str, Any] = json.load(__snake_case ) UpperCAmelCase : Optional[int] = {v: k for k, v in self.vocab.items()} @property def A ( self : Union[str, Any] ) -> List[str]: return len(self.vocab ) def A ( self : Dict ) -> List[Any]: return dict(self.vocab , **self.added_tokens_encoder ) def A ( self : int , __snake_case : Union[str, Any] ) -> Dict: UpperCAmelCase : int = [] for s in text: char_tokens.extend(__snake_case ) return char_tokens def A ( self : Optional[int] , __snake_case : List[str] ) -> List[Any]: return self.vocab.get(__snake_case , self.vocab.get(self.unk_token ) ) def A ( self : Optional[Any] , __snake_case : Optional[int] ) -> int: return self.decoder.get(__snake_case ) def A ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__snake_case ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__snake_case ) ) return UpperCAmelCase : List[str] = os.path.join( __snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' ) return (vocab_file,)
23
"""simple docstring""" def _A (__a = 50 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
91
0
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def lowerCamelCase__ ( ) -> tuple[list[int], int]: __snake_case = [randint(-1000 , 1000 ) for i in range(10 )] __snake_case = randint(-5000 , 5000 ) return (arr, r) snake_case_ = make_dataset() def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> tuple[int, ...]: for triplet in permutations(snake_case_ , 3 ): if sum(snake_case_ ) == target: return tuple(sorted(snake_case_ ) ) return (0, 0, 0) def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> tuple[int, int, int]: arr.sort() __snake_case = len(snake_case_ ) for i in range(n - 1 ): __snake_case , __snake_case = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def lowerCamelCase__ ( ) -> tuple[float, float]: __snake_case = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' __snake_case = ''' triplet_sum1(*dataset) ''' __snake_case = ''' triplet_sum2(*dataset) ''' __snake_case = repeat(setup=snake_case_ , stmt=snake_case_ , repeat=5 , number=1_0000 ) __snake_case = repeat(setup=snake_case_ , stmt=snake_case_ , repeat=5 , number=1_0000 ) return (min(snake_case_ ), min(snake_case_ )) if __name__ == "__main__": from doctest import testmod testmod() snake_case_ = solution_times() print(F'The time for naive implementation is {times[0]}.') print(F'The time for optimized implementation is {times[1]}.')
24
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = (PNDMScheduler,) __UpperCamelCase = (("num_inference_steps", 5_0),) def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = { '''num_train_timesteps''': 1000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowercase_) return config def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]=0 , **lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class.from_pretrained(lowercase_) new_scheduler.set_timesteps(lowercase_) # copy over dummy past residuals SCREAMING_SNAKE_CASE_ : Optional[Any] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]=0 , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('''num_inference_steps''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(lowercase_) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE_ : Any = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Tuple = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : str , **lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Dict = 10 SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_) for i, t in enumerate(scheduler.prk_timesteps): SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample for i, t in enumerate(scheduler.plms_timesteps): SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_).prev_sample return sample def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''num_inference_steps''' , lowercase_) for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , '''set_timesteps'''): scheduler.set_timesteps(lowercase_) elif num_inference_steps is not None and not hasattr(lowercase_ , '''set_timesteps'''): SCREAMING_SNAKE_CASE_ : Optional[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] SCREAMING_SNAKE_CASE_ : Optional[int] = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_).prev_sample SCREAMING_SNAKE_CASE_ : Any = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config(steps_offset=1) SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**lowercase_) scheduler.set_timesteps(10) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , ) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]): self.check_over_forward(num_inference_steps=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27 for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample SCREAMING_SNAKE_CASE_ : str = 0.1 * sample SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowercase_) scheduler.set_timesteps(lowercase_) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2]): SCREAMING_SNAKE_CASE_ : int = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_).prev_sample def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' with self.assertRaises(lowercase_): SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**lowercase_) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.full_loop() SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_98.13_18) < 1e-2 assert abs(result_mean.item() - 0.25_80) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.full_loop(prediction_type='''v_prediction''') SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 67.39_86) < 1e-2 assert abs(result_mean.item() - 0.08_78) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 2_30.03_99) < 1e-2 assert abs(result_mean.item() - 0.29_95) < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01) SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowercase_)) SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(lowercase_)) assert abs(result_sum.item() - 1_86.94_82) < 1e-2 assert abs(result_mean.item() - 0.24_34) < 1e-3
91
0
"""simple docstring""" from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "layer_norm" , SCREAMING_SNAKE_CASE__ = False , ) -> Optional[Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : List[Any] = only_cross_attention SCREAMING_SNAKE_CASE__ : List[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero""" SCREAMING_SNAKE_CASE__ : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm""" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: SCREAMING_SNAKE_CASE__ : int = AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE__ : Tuple = AdaLayerNormZero(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = Attention( query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. SCREAMING_SNAKE_CASE__ : Any = ( AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : Tuple = Attention( query_dim=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # is self-attn if encoder_hidden_states is none else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : str = None # 3. Feed-forward SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = FeedForward(SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , final_dropout=SCREAMING_SNAKE_CASE__ ) # let chunk size default to None SCREAMING_SNAKE_CASE__ : Tuple = None SCREAMING_SNAKE_CASE__ : Dict = 0 def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = chunk_size SCREAMING_SNAKE_CASE__ : str = dim def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ) -> Any: """simple docstring""" if self.use_ada_layer_norm: SCREAMING_SNAKE_CASE__ : Any = self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.norma( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=hidden_states.dtype ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.norma(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {} SCREAMING_SNAKE_CASE__ : List[Any] = self.attna( SCREAMING_SNAKE_CASE__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) if self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE__ : List[str] = gate_msa.unsqueeze(1 ) * attn_output SCREAMING_SNAKE_CASE__ : Dict = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: SCREAMING_SNAKE_CASE__ : str = ( self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : List[str] = self.attna( SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = attn_output + hidden_states # 3. Feed-forward SCREAMING_SNAKE_CASE__ : Any = self.norma(SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE__ : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) SCREAMING_SNAKE_CASE__ : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size SCREAMING_SNAKE_CASE__ : int = torch.cat( [self.ff(SCREAMING_SNAKE_CASE__ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE__ , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: SCREAMING_SNAKE_CASE__ : int = self.ff(SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE__ : Tuple = gate_mlp.unsqueeze(1 ) * ff_output SCREAMING_SNAKE_CASE__ : Optional[Any] = ff_output + hidden_states return hidden_states class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = False , ) -> Tuple: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : Optional[int] = int(dim * mult ) SCREAMING_SNAKE_CASE__ : Tuple = dim_out if dim_out is not None else dim if activation_fn == "gelu": SCREAMING_SNAKE_CASE__ : str = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if activation_fn == "gelu-approximate": SCREAMING_SNAKE_CASE__ : List[str] = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , approximate="""tanh""" ) elif activation_fn == "geglu": SCREAMING_SNAKE_CASE__ : Optional[Any] = GEGLU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif activation_fn == "geglu-approximate": SCREAMING_SNAKE_CASE__ : int = ApproximateGELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = nn.ModuleList([] ) # project in self.net.append(SCREAMING_SNAKE_CASE__ ) # project dropout self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) ) # project out self.net.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" for module in self.net: SCREAMING_SNAKE_CASE__ : Dict = module(SCREAMING_SNAKE_CASE__ ) return hidden_states class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "none" ) -> List[Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = approximate def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: """simple docstring""" if gate.device.type != "mps": return F.gelu(SCREAMING_SNAKE_CASE__ , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.proj(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = self.gelu(SCREAMING_SNAKE_CASE__ ) return hidden_states class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : Dict = nn.Linear(SCREAMING_SNAKE_CASE__ , dim_out * 2 ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" if gate.device.type != "mps": return F.gelu(SCREAMING_SNAKE_CASE__ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.proj(SCREAMING_SNAKE_CASE__ ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(SCREAMING_SNAKE_CASE__ ) class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.proj(SCREAMING_SNAKE_CASE__ ) return x * torch.sigmoid(1.702 * x ) class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : List[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = nn.SiLU() SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , embedding_dim * 2 ) SCREAMING_SNAKE_CASE__ : List[str] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ ) ) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 ) SCREAMING_SNAKE_CASE__ : Dict = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale) + shift return x class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : Any = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.SiLU() SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ , eps=1E-6 ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=SCREAMING_SNAKE_CASE__ ) ) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = emb.chunk(6 , dim=1 ) SCREAMING_SNAKE_CASE__ : Tuple = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1E-5 ) -> int: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : List[Any] = num_groups SCREAMING_SNAKE_CASE__ : Any = eps if act_fn is None: SCREAMING_SNAKE_CASE__ : Any = None else: SCREAMING_SNAKE_CASE__ : Dict = get_activation(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , out_dim * 2 ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" if self.act: SCREAMING_SNAKE_CASE__ : Tuple = self.act(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = self.linear(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = emb[:, :, None, None] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = emb.chunk(2 , dim=1 ) SCREAMING_SNAKE_CASE__ : Dict = F.group_norm(SCREAMING_SNAKE_CASE__ , self.num_groups , eps=self.eps ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = x * (1 + scale) + shift return x
25
"""simple docstring""" import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @parameterized.expand([(None,), ('''foo.json''',)]) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50) self.assertEqual(loaded_config.max_length , 20) self.assertEqual(loaded_config.max_time , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained('''gpt2''') SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_model_config(lowercase_) SCREAMING_SNAKE_CASE_ : int = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = GenerationConfig() SCREAMING_SNAKE_CASE_ : Any = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = generation_config.update(**lowercase_) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {'''foo''': '''bar'''}) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig() SCREAMING_SNAKE_CASE_ : List[str] = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''') as tmp_dir: generation_config.save_pretrained(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = GenerationConfig.from_pretrained(lowercase_) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''') SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig.from_model_config(lowercase_) assert not hasattr(lowercase_ , '''foo''') # no new kwargs should be initialized if from config def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , lowercase_) self.assertEqual(default_config.num_beams , 1) SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , lowercase_) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , lowercase_) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = TOKEN HfFolder.save_token(lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str]): '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-generation-config''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''') except HTTPError: pass def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_pretrained(F'{USER}/test-generation-config') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id='''test-generation-config''' , push_to_hub=lowercase_ , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Optional[int] = GenerationConfig.from_pretrained(F'{USER}/test-generation-config') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowercase_ , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
91
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json", } class lowercase ( UpperCamelCase__ ): _a = "transfo-xl" _a = ["mems"] _a = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _a=26_7735 , _a=[2_0000, 4_0000, 20_0000] , _a=1024 , _a=1024 , _a=16 , _a=64 , _a=4096 , _a=4 , _a=False , _a=18 , _a=1600 , _a=1000 , _a=True , _a=True , _a=0 , _a=-1 , _a=True , _a=0.1 , _a=0.0 , _a=True , _a="normal" , _a=0.01 , _a=0.01 , _a=0.02 , _a=1e-5 , _a=0 , **_a , ) -> Tuple: _A : Union[str, Any] = vocab_size _A : Union[str, Any] = [] self.cutoffs.extend(_a ) if proj_share_all_but_first: _A : Union[str, Any] = [False] + [True] * len(self.cutoffs ) else: _A : Dict = [False] + [False] * len(self.cutoffs ) _A : List[str] = d_model _A : Dict = d_embed _A : Dict = d_head _A : Optional[int] = d_inner _A : Tuple = div_val _A : Union[str, Any] = pre_lnorm _A : Union[str, Any] = n_layer _A : str = n_head _A : Optional[Any] = mem_len _A : Tuple = same_length _A : Optional[int] = attn_type _A : Any = clamp_len _A : Dict = sample_softmax _A : Any = adaptive _A : List[str] = dropout _A : List[str] = dropatt _A : Union[str, Any] = untie_r _A : Optional[int] = init _A : Union[str, Any] = init_range _A : Tuple = proj_init_std _A : int = init_std _A : List[str] = layer_norm_epsilon super().__init__(eos_token_id=_a , **_a ) @property def a__ ( self ) -> int: # Message copied from Transformer-XL documentation logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def a__ ( self , _a ) -> Tuple: # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
26
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets UpperCAmelCase_ : Optional[Any] = datasets.logging.get_logger(__name__) UpperCAmelCase_ : List[str] = """\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\", author = \"Moosavi, Nafise Sadat and Strube, Michael\", booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\", month = aug, year = \"2016\", address = \"Berlin, Germany\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/P16-1060\", doi = \"10.18653/v1/P16-1060\", pages = \"632--642\", } """ UpperCAmelCase_ : Tuple = """\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. """ UpperCAmelCase_ : Union[str, Any] = """ Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting 'keep_singletons=False', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs. min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: 'mentions': mentions 'muc': MUC metric [Vilain et al, 1995] 'bcub': B-cubed [Bagga and Baldwin, 1998] 'ceafe': CEAFe [Luo et al., 2005] 'lea': LEA [Moosavi and Strube, 2016] 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric('coval') >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -', ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)', ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)', ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -', ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -', ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {'mentions/recall': 1.0,[...] 'conll_score': 100.0} """ def _A (__a , __a , __a=False , __a=False , __a=True , __a=False , __a="dummy_doc" ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = {doc: key_lines} SCREAMING_SNAKE_CASE_ : List[str] = {doc: sys_lines} SCREAMING_SNAKE_CASE_ : Dict = {} SCREAMING_SNAKE_CASE_ : Dict = 0 SCREAMING_SNAKE_CASE_ : List[str] = 0 SCREAMING_SNAKE_CASE_ : Tuple = 0 SCREAMING_SNAKE_CASE_ : int = 0 SCREAMING_SNAKE_CASE_ : List[str] = 0 SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a ) key_singletons_num += singletons_num if NP_only or min_span: SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a ) sys_singletons_num += singletons_num if NP_only or min_span: SCREAMING_SNAKE_CASE_ : Union[str, Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a ) if remove_nested: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( '''Number of resulting singleton clusters in the key ''' f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' '''files, respectively''' ) return doc_coref_infos def _A (__a , __a , __a , __a , __a , __a , __a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = get_coref_infos(__a , __a , __a , __a , __a , __a ) SCREAMING_SNAKE_CASE_ : str = {} SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE_ : str = 0 for name, metric in metrics: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = evaluator.evaluate_documents(__a , __a , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} ) logger.info( name.ljust(10 ) , f'Recall: {recall * 1_00:.2f}' , f' Precision: {precision * 1_00:.2f}' , f' F1: {fa * 1_00:.2f}' , ) if conll_subparts_num == 3: SCREAMING_SNAKE_CASE_ : Tuple = (conll / 3) * 1_00 logger.info(f'CoNLL score: {conll:.2f}' ) output_scores.update({'''conll_score''': conll} ) return output_scores def _A (__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: SCREAMING_SNAKE_CASE_ : Any = line.split()[5] if not parse_col == "-": SCREAMING_SNAKE_CASE_ : Any = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''')), '''references''': datasets.Sequence(datasets.Value('''string''')), }) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=True , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: SCREAMING_SNAKE_CASE_ : Union[str, Any] = util.check_gold_parse_annotation(lowercase_) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''') # util.parse_key_file(key_file) # key_file = key_file + ".parsed" SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate( key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , ) return score
91
0
'''simple docstring''' import math def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 1 / 12_345 ): __a : List[str] = 0 __a : Any = 0 __a : int = 3 while True: __a : Tuple = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(_SCREAMING_SNAKE_CASE ): __a : str = int(_SCREAMING_SNAKE_CASE ) total_partitions += 1 if check_partition_perfect(_SCREAMING_SNAKE_CASE ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(_SCREAMING_SNAKE_CASE ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
27
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Tuple = """▁""" UpperCAmelCase_ : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCAmelCase_ : str = { """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } UpperCAmelCase_ : str = { """facebook/xglm-564M""": 2048, } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self : List[Any] , lowercase_ : str , lowercase_ : Tuple="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer SCREAMING_SNAKE_CASE_ : List[str] = 7 SCREAMING_SNAKE_CASE_ : Tuple = [F'<madeupword{i}>' for i in range(self.num_madeup_words)] SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''additional_special_tokens''' , []) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} SCREAMING_SNAKE_CASE_ : List[Any] = len(self.sp_model) SCREAMING_SNAKE_CASE_ : Optional[Any] = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)} self.fairseq_tokens_to_ids.update(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy() SCREAMING_SNAKE_CASE_ : str = None SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Tuple , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} SCREAMING_SNAKE_CASE_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_) if token_ids_a is None: return [1] + ([0] * len(lowercase_)) return [1] + ([0] * len(lowercase_)) + [1, 1] + ([0] * len(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a) * [0] @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str): '''simple docstring''' return self.sp_model.encode(lowercase_ , out_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Union[str, Any]): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.PieceToId(lowercase_) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any]): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(lowercase_).replace(lowercase_ , ''' ''').strip() return out_string def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' if not os.path.isdir(lowercase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , lowercase_) elif not os.path.isfile(self.vocab_file): with open(lowercase_ , '''wb''') as fi: SCREAMING_SNAKE_CASE_ : int = self.sp_model.serialized_model_proto() fi.write(lowercase_) return (out_vocab_file,)
91
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _lowerCamelCase : Tuple = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Any = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Dict = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16''' self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : str = '''fp16''' self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
91
0
def lowercase__ ( __snake_case : int , __snake_case : int ): '''simple docstring''' while b: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = b, a % b return a def lowercase__ ( __snake_case : int , __snake_case : int ): '''simple docstring''' return a if b == 0 else euclidean_gcd_recursive(__snake_case , a % b ) def lowercase__ ( ): '''simple docstring''' print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" ) print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" ) print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" ) print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" ) print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" ) print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" ) print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" ) print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" ) print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" ) print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" ) if __name__ == "__main__": main()
29
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable UpperCAmelCase_ : str = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Dict = ["""GPTNeoXTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[str] = [ """GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXForCausalLM""", """GPTNeoXForQuestionAnswering""", """GPTNeoXForSequenceClassification""", """GPTNeoXForTokenClassification""", """GPTNeoXLayer""", """GPTNeoXModel""", """GPTNeoXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
91
0
def a ( snake_case__: list ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] lowercase_ = [] def generate(snake_case__: int , snake_case__: list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even lowercase_ , lowercase_ = arr[k - 1], arr[i] else: # k is odd lowercase_ , lowercase_ = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item) for item in user_input.split(',')] print(heaps(arr))
30
"""simple docstring""" import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py UpperCAmelCase_ : Optional[int] = """src/transformers""" UpperCAmelCase_ : Tuple = """docs/source/en""" UpperCAmelCase_ : Optional[Any] = """.""" def _A (__a , __a , __a ) -> Dict: """simple docstring""" with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Dict = f.readlines() # Find the start prompt. SCREAMING_SNAKE_CASE_ : List[Any] = 0 while not lines[start_index].startswith(__a ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : Tuple = start_index while not lines[end_index].startswith(__a ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | UpperCAmelCase_ : Optional[Any] = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. UpperCAmelCase_ : int = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") UpperCAmelCase_ : Dict = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. UpperCAmelCase_ : int = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase_ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH) def _A (__a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __a ) return [m.group(0 ) for m in matches] def _A (__a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = 2 if text == '''✅''' or text == '''❌''' else len(__a ) SCREAMING_SNAKE_CASE_ : Tuple = (width - text_length) // 2 SCREAMING_SNAKE_CASE_ : Tuple = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ : Tuple = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } SCREAMING_SNAKE_CASE_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a ) # Let's lookup through all transformers object (once). for attr_name in dir(__a ): SCREAMING_SNAKE_CASE_ : Any = None if attr_name.endswith('''Tokenizer''' ): SCREAMING_SNAKE_CASE_ : Dict = slow_tokenizers SCREAMING_SNAKE_CASE_ : Dict = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = fast_tokenizers SCREAMING_SNAKE_CASE_ : Optional[Any] = attr_name[:-13] elif _re_tf_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : int = tf_models SCREAMING_SNAKE_CASE_ : Dict = _re_tf_models.match(__a ).groups()[0] elif _re_flax_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : Any = flax_models SCREAMING_SNAKE_CASE_ : Tuple = _re_flax_models.match(__a ).groups()[0] elif _re_pt_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : str = pt_models SCREAMING_SNAKE_CASE_ : int = _re_pt_models.match(__a ).groups()[0] if lookup_dict is not None: while len(__a ) > 0: if attr_name in model_name_to_prefix.values(): SCREAMING_SNAKE_CASE_ : List[str] = True break # Try again after removing the last word in the name SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(camel_case_split(__a )[:-1] ) # Let's build that table! SCREAMING_SNAKE_CASE_ : Any = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) SCREAMING_SNAKE_CASE_ : Any = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). SCREAMING_SNAKE_CASE_ : List[str] = [len(__a ) + 2 for c in columns] SCREAMING_SNAKE_CASE_ : str = max([len(__a ) for name in model_names] ) + 2 # Build the table per se SCREAMING_SNAKE_CASE_ : List[Any] = '''|''' + '''|'''.join([_center_text(__a , __a ) for c, w in zip(__a , __a )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" SCREAMING_SNAKE_CASE_ : Union[str, Any] = {True: '''✅''', False: '''❌'''} for name in model_names: SCREAMING_SNAKE_CASE_ : str = model_name_to_prefix[name] SCREAMING_SNAKE_CASE_ : int = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__a , __a ) for l, w in zip(__a , __a )] ) + "|\n" return table def _A (__a=False ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = _find_text_in_file( filename=os.path.join(__a , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) SCREAMING_SNAKE_CASE_ : Tuple = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__a , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ : Any = parser.parse_args() check_model_table(args.fix_and_overwrite)
91
0
'''simple docstring''' import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class lowerCamelCase_ (snake_case__ ): '''simple docstring''' __UpperCamelCase: Tuple = "M-CLIP" def __init__( self : Optional[Any] , A : List[Any]=1024 , A : Any=768 , **A : Tuple ): _UpperCAmelCase : str = transformerDimSize _UpperCAmelCase : Optional[int] = imageDimSize super().__init__(**A ) class lowerCamelCase_ (snake_case__ ): '''simple docstring''' __UpperCamelCase: Dict = MCLIPConfig def __init__( self : Optional[Any] , A : Any , *A : Any , **A : Optional[int] ): super().__init__(A , *A , **A ) _UpperCAmelCase : Union[str, Any] = XLMRobertaModel(A ) _UpperCAmelCase : Optional[int] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def _A ( self : Union[str, Any] , A : int , A : Any ): _UpperCAmelCase : Tuple = self.transformer(input_ids=A , attention_mask=A )[0] _UpperCAmelCase : List[str] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(A ), embs
31
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : List[str]=13 , lowercase_ : int=7 , lowercase_ : Any=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Dict=99 , lowercase_ : Union[str, Any]=24 , lowercase_ : int=2 , lowercase_ : List[str]=6 , lowercase_ : Any=37 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Any=2 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Optional[int]=None , lowercase_ : str=1000 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = parent SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length SCREAMING_SNAKE_CASE_ : List[Any] = is_training SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE_ : Optional[Any] = use_token_type_ids SCREAMING_SNAKE_CASE_ : int = use_labels SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size SCREAMING_SNAKE_CASE_ : List[str] = hidden_size SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size SCREAMING_SNAKE_CASE_ : Tuple = hidden_act SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Any = initializer_range SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels SCREAMING_SNAKE_CASE_ : Tuple = scope SCREAMING_SNAKE_CASE_ : Optional[int] = range_bbox def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 1] SCREAMING_SNAKE_CASE_ : str = t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE_ : List[str] = bbox[i, j, 2] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 0] SCREAMING_SNAKE_CASE_ : List[str] = t SCREAMING_SNAKE_CASE_ : Tuple = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ : List[str] = None SCREAMING_SNAKE_CASE_ : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ : Any = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = LiltModel(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : int = model(lowercase_ , bbox=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Optional[Any] = LiltForTokenClassification(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Tuple = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = LiltForQuestionAnswering(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : List[str] = config_and_inputs SCREAMING_SNAKE_CASE_ : str = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __UpperCamelCase = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str): '''simple docstring''' return True def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = LiltModelTester(self) SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_ : Dict = type self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Optional[int] = LiltModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) @require_torch @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(lowercase_) SCREAMING_SNAKE_CASE_ : str = torch.tensor([[1, 2]] , device=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Dict = model(input_ids=lowercase_ , bbox=lowercase_) SCREAMING_SNAKE_CASE_ : str = torch.Size([1, 2, 768]) SCREAMING_SNAKE_CASE_ : Dict = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=lowercase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowercase_) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1e-3))
91
0
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split UpperCAmelCase_ : Optional[int] = datasets.load_iris() UpperCAmelCase_ : int = np.array(data['data']) UpperCAmelCase_ : Optional[int] = np.array(data['target']) UpperCAmelCase_ : Tuple = data['target_names'] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = train_test_split(X, y) def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : str ) -> str: """simple docstring""" return np.linalg.norm(np.array(__A ) - np.array(__A ) ) def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] , __A : Tuple , __A : Any=5 ) -> str: """simple docstring""" a_ : str = zip(__A , __A ) # List of distances of all points from the point to be classified a_ : Tuple = [] for data_point in data: a_ : int = euclidean_distance(data_point[0] , __A ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. a_ : Tuple = [i[1] for i in sorted(__A )[:k]] # Most commonly occurring class among them # is the class into which the point is classified a_ : Tuple = Counter(__A ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
32
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCAmelCase_ : Dict = logging.getLogger(__name__) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser( description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)""" ) parser.add_argument( """--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset.""" ) parser.add_argument( """--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file.""" ) parser.add_argument("""--vocab_size""", default=30522, type=int) UpperCAmelCase_ : Optional[Any] = parser.parse_args() logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file, """rb""") as fp: UpperCAmelCase_ : Union[str, Any] = pickle.load(fp) logger.info("""Counting occurrences for MLM.""") UpperCAmelCase_ : Any = Counter() for tk_ids in data: counter.update(tk_ids) UpperCAmelCase_ : List[Any] = [0] * args.vocab_size for k, v in counter.items(): UpperCAmelCase_ : Dict = v logger.info(f'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, """wb""") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
91
0
"""simple docstring""" def lowercase ( __snake_case : int , __snake_case : int ): while b: lowercase_ , lowercase_ : str = b, a % b return a def lowercase ( __snake_case : int , __snake_case : int ): return a if b == 0 else euclidean_gcd_recursive(__snake_case , a % b ) def lowercase ( ): print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
33
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) def _A (__a , __a ) -> Tuple: """simple docstring""" try: with open(__a , '''rb''' ) as flax_state_f: SCREAMING_SNAKE_CASE_ : Optional[int] = from_bytes(__a , flax_state_f.read() ) except UnpicklingError as e: try: with open(__a ) as f: if f.read().startswith('''version''' ): raise OSError( '''You seem to have cloned a repository without having git-lfs installed. Please''' ''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the''' ''' folder you cloned.''' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(__a , __a ) def _A (__a , __a ) -> Tuple: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights SCREAMING_SNAKE_CASE_ : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , __a ) ).values() if any(__a ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.tree_util.tree_map( lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __a ) SCREAMING_SNAKE_CASE_ : int = '''''' SCREAMING_SNAKE_CASE_ : str = flatten_dict(__a , sep='''.''' ) SCREAMING_SNAKE_CASE_ : List[Any] = pt_model.state_dict() # keep track of unexpected & missing keys SCREAMING_SNAKE_CASE_ : str = [] SCREAMING_SNAKE_CASE_ : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple.split('''.''' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple_array[:-1] + ['''weight'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.transpose(__a , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": SCREAMING_SNAKE_CASE_ : Tuple = flax_key_tuple_array[:-1] + ['''weight'''] SCREAMING_SNAKE_CASE_ : Optional[int] = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": SCREAMING_SNAKE_CASE_ : Optional[int] = flax_key_tuple_array[:-1] + ['''weight'''] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(__a ): SCREAMING_SNAKE_CASE_ : List[str] = ( flax_key_tuple_string.replace('''_0''' , '''.0''' ) .replace('''_1''' , '''.1''' ) .replace('''_2''' , '''.2''' ) .replace('''_3''' , '''.3''' ) .replace('''_4''' , '''.4''' ) .replace('''_5''' , '''.5''' ) .replace('''_6''' , '''.6''' ) .replace('''_7''' , '''.7''' ) .replace('''_8''' , '''.8''' ) .replace('''_9''' , '''.9''' ) ) SCREAMING_SNAKE_CASE_ : Optional[Any] = '''.'''.join(__a ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(__a ) if not isinstance(__a , np.ndarray ) else flax_tensor SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(__a ) # remove from missing keys missing_keys.remove(__a ) else: # weight is not expected by PyTorch model unexpected_keys.append(__a ) pt_model.load_state_dict(__a ) # re-transform missing_keys to list SCREAMING_SNAKE_CASE_ : int = list(__a ) if len(__a ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) if len(__a ) > 0: logger.warning( f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) return pt_model
91
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A =logging.get_logger(__name__) A ={ 'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json', # See all Nat models at https://huggingface.co/models?filter=nat } class _a ( __a , __a ): __a : List[str] = """nat""" __a : Optional[int] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Union[str, Any] , lowercase : Optional[int]=4 , lowercase : List[str]=3 , lowercase : int=64 , lowercase : Union[str, Any]=[3, 4, 6, 5] , lowercase : Optional[int]=[2, 4, 8, 16] , lowercase : Any=7 , lowercase : List[Any]=3.0 , lowercase : str=True , lowercase : Tuple=0.0 , lowercase : Any=0.0 , lowercase : Dict=0.1 , lowercase : str="gelu" , lowercase : List[str]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=0.0 , lowercase : Optional[int]=None , lowercase : int=None , **lowercase : Optional[Any] , ): '''simple docstring''' super().__init__(**lowercase ) UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = embed_dim UpperCAmelCase = depths UpperCAmelCase = len(lowercase ) UpperCAmelCase = num_heads UpperCAmelCase = kernel_size UpperCAmelCase = mlp_ratio UpperCAmelCase = qkv_bias UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = drop_path_rate UpperCAmelCase = hidden_act UpperCAmelCase = layer_norm_eps UpperCAmelCase = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase = int(embed_dim * 2 ** (len(lowercase ) - 1) ) UpperCAmelCase = layer_scale_init_value UpperCAmelCase = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowercase ) + 1 )] UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices( out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
34
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Any = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "openai-gpt" __UpperCamelCase = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[str] , lowercase_ : List[str]=40478 , lowercase_ : List[str]=512 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=1e-5 , lowercase_ : int=0.02 , lowercase_ : Optional[int]="cls_index" , lowercase_ : Any=True , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=0.1 , **lowercase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE_ : Tuple = n_positions SCREAMING_SNAKE_CASE_ : Optional[int] = n_embd SCREAMING_SNAKE_CASE_ : Dict = n_layer SCREAMING_SNAKE_CASE_ : Any = n_head SCREAMING_SNAKE_CASE_ : Union[str, Any] = afn SCREAMING_SNAKE_CASE_ : int = resid_pdrop SCREAMING_SNAKE_CASE_ : List[str] = embd_pdrop SCREAMING_SNAKE_CASE_ : Union[str, Any] = attn_pdrop SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range SCREAMING_SNAKE_CASE_ : List[str] = summary_type SCREAMING_SNAKE_CASE_ : Tuple = summary_use_proj SCREAMING_SNAKE_CASE_ : Union[str, Any] = summary_activation SCREAMING_SNAKE_CASE_ : Any = summary_first_dropout SCREAMING_SNAKE_CASE_ : List[str] = summary_proj_to_labels super().__init__(**lowercase_)
91
0
'''simple docstring''' from __future__ import annotations from typing import Any def __snake_case( _lowerCAmelCase ) -> int: if not postfix_notation: return 0 snake_case__ : Tuple = {"""+""", """-""", """*""", """/"""} snake_case__ : list[Any] = [] for token in postfix_notation: if token in operations: snake_case__ , snake_case__ : Optional[int] = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_lowerCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
35
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[str] , *lowercase_ : Dict , **lowercase_ : Union[str, Any]): '''simple docstring''' warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_)
91
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class UpperCAmelCase_ ( a): lowerCamelCase__ = 'wav2vec2' def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ): '''simple docstring''' super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a) _lowerCAmelCase : str = hidden_size _lowerCAmelCase : Optional[int] = feat_extract_norm _lowerCAmelCase : Union[str, Any] = feat_extract_activation _lowerCAmelCase : Optional[Any] = list(__a) _lowerCAmelCase : List[str] = list(__a) _lowerCAmelCase : str = list(__a) _lowerCAmelCase : List[str] = conv_bias _lowerCAmelCase : str = num_conv_pos_embeddings _lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups _lowerCAmelCase : str = len(self.conv_dim) _lowerCAmelCase : List[str] = num_hidden_layers _lowerCAmelCase : str = intermediate_size _lowerCAmelCase : Any = hidden_act _lowerCAmelCase : int = num_attention_heads _lowerCAmelCase : Optional[Any] = hidden_dropout _lowerCAmelCase : List[str] = attention_dropout _lowerCAmelCase : Tuple = activation_dropout _lowerCAmelCase : int = feat_proj_dropout _lowerCAmelCase : List[str] = final_dropout _lowerCAmelCase : int = layerdrop _lowerCAmelCase : int = layer_norm_eps _lowerCAmelCase : Union[str, Any] = initializer_range _lowerCAmelCase : str = vocab_size _lowerCAmelCase : Optional[Any] = do_stable_layer_norm _lowerCAmelCase : Any = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase : str = apply_spec_augment _lowerCAmelCase : Optional[Any] = mask_time_prob _lowerCAmelCase : Optional[int] = mask_time_length _lowerCAmelCase : List[str] = mask_time_min_masks _lowerCAmelCase : Optional[int] = mask_feature_prob _lowerCAmelCase : Optional[int] = mask_feature_length _lowerCAmelCase : List[str] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCAmelCase : Union[str, Any] = num_codevectors_per_group _lowerCAmelCase : str = num_codevector_groups _lowerCAmelCase : Optional[int] = contrastive_logits_temperature _lowerCAmelCase : Optional[int] = feat_quantizer_dropout _lowerCAmelCase : Optional[int] = num_negatives _lowerCAmelCase : Union[str, Any] = codevector_dim _lowerCAmelCase : Any = proj_codevector_dim _lowerCAmelCase : Optional[int] = diversity_loss_weight # ctc loss _lowerCAmelCase : Tuple = ctc_loss_reduction _lowerCAmelCase : Tuple = ctc_zero_infinity # adapter _lowerCAmelCase : List[Any] = add_adapter _lowerCAmelCase : List[str] = adapter_kernel_size _lowerCAmelCase : str = adapter_stride _lowerCAmelCase : List[str] = num_adapter_layers _lowerCAmelCase : str = output_hidden_size or hidden_size _lowerCAmelCase : Tuple = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCAmelCase : str = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCAmelCase : str = list(__a) _lowerCAmelCase : Union[str, Any] = list(__a) _lowerCAmelCase : List[str] = list(__a) _lowerCAmelCase : Tuple = xvector_output_dim @property def snake_case__ ( self): '''simple docstring''' return functools.reduce(operator.mul, self.conv_stride, 1)
36
"""simple docstring""" import random from typing import Any def _A (__a ) -> list[Any]: """simple docstring""" for _ in range(len(__a ) ): SCREAMING_SNAKE_CASE_ : Optional[int] = random.randint(0 , len(__a ) - 1 ) SCREAMING_SNAKE_CASE_ : Tuple = random.randint(0 , len(__a ) - 1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = data[b], data[a] return data if __name__ == "__main__": UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5, 6, 7] UpperCAmelCase_ : Dict = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
91
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor _lowerCAmelCase = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if isinstance(UpperCamelCase , torch.Tensor ): return image elif isinstance(UpperCamelCase , PIL.Image.Image ): lowerCAmelCase__ : Union[str, Any] = [image] lowerCAmelCase__ : Any = [trans(img.convert("""RGB""" ) ) for img in image] lowerCAmelCase__ : str = torch.stack(UpperCamelCase ) return image class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]: super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase__ : Dict = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: if strength < 0 or strength > 1: raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: # get the original timestep using init_timestep lowerCAmelCase__ : str = min(int(num_inference_steps * strength ) ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = max(num_inference_steps - init_timestep ,0 ) lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> List[str]: if not isinstance(__UpperCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__UpperCAmelCase )}""" ) lowerCAmelCase__ : Tuple = image.to(device=__UpperCAmelCase ,dtype=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) lowerCAmelCase__ : Union[str, Any] = init_latents.shape lowerCAmelCase__ : List[str] = randn_tensor(__UpperCAmelCase ,generator=__UpperCAmelCase ,device=__UpperCAmelCase ,dtype=__UpperCAmelCase ) # get latents print("""add noise to latents at timestep""" ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.scheduler.add_noise(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = init_latents return latents @torch.no_grad() def __call__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = 0.8 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = None ,__UpperCAmelCase = 0.0 ,__UpperCAmelCase = 50 ,__UpperCAmelCase = None ,__UpperCAmelCase = "pil" ,__UpperCAmelCase = True ,) -> Union[ImagePipelineOutput, Tuple]: self.check_inputs(__UpperCAmelCase ) # 2. Preprocess image lowerCAmelCase__ : List[str] = preprocess(__UpperCAmelCase ) # 3. set timesteps self.scheduler.set_timesteps(__UpperCAmelCase ,device=self.device ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.get_timesteps(__UpperCAmelCase ,__UpperCAmelCase ,self.device ) lowerCAmelCase__ : Optional[int] = timesteps[:1].repeat(__UpperCAmelCase ) # 4. Prepare latent variables lowerCAmelCase__ : Tuple = self.prepare_latents(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,self.unet.dtype ,self.device ,__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = latents # 5. Denoising loop for t in self.progress_bar(__UpperCAmelCase ): # 1. predict noise model_output lowerCAmelCase__ : str = self.unet(__UpperCAmelCase ,__UpperCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase__ : str = self.scheduler.step( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,eta=__UpperCAmelCase ,use_clipped_model_output=__UpperCAmelCase ,generator=__UpperCAmelCase ,).prev_sample lowerCAmelCase__ : Optional[Any] = (image / 2 + 0.5).clamp(0 ,1 ) lowerCAmelCase__ : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": lowerCAmelCase__ : List[str] = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=__UpperCAmelCase )
37
"""simple docstring""" import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _A (__a , __a , __a ) -> Dict: """simple docstring""" if gpta_config_file == "": SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaConfig() else: SCREAMING_SNAKE_CASE_ : Tuple = GPTaConfig.from_json_file(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaModel(__a ) # Load weights from numpy load_tf_weights_in_gpta(__a , __a , __a ) # Save pytorch-model SCREAMING_SNAKE_CASE_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME SCREAMING_SNAKE_CASE_ : List[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , __a ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(__a , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--gpt2_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
91
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase_ : Dict = { '''configuration_mobilenet_v2''': [ '''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileNetV2Config''', '''MobileNetV2OnnxConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Union[str, Any] = ['''MobileNetV2FeatureExtractor'''] UpperCAmelCase_ : Union[str, Any] = ['''MobileNetV2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[str] = [ '''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileNetV2ForImageClassification''', '''MobileNetV2ForSemanticSegmentation''', '''MobileNetV2Model''', '''MobileNetV2PreTrainedModel''', '''load_tf_weights_in_mobilenet_v2''', ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
38
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
91
0
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __lowerCamelCase ( snake_case__): """simple docstring""" @slow @require_torch def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' ) _UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' ) _UpperCAmelCase = bertabert.config.encoder.vocab_size _UpperCAmelCase = tokenizer.sep_token_id _UpperCAmelCase = tokenizer.cls_token_id _UpperCAmelCase = 128 _UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' ) _UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' ) _UpperCAmelCase = train_dataset.select(range(32 ) ) _UpperCAmelCase = val_dataset.select(range(16 ) ) _UpperCAmelCase = 4 def _map_to_encoder_decoder_inputs(UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] _UpperCAmelCase = tokenizer(batch['article'] , padding='max_length' , truncation=UpperCAmelCase , max_length=512 ) _UpperCAmelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=UpperCAmelCase , max_length=128 ) _UpperCAmelCase = inputs.input_ids _UpperCAmelCase = inputs.attention_mask _UpperCAmelCase = outputs.input_ids _UpperCAmelCase = outputs.input_ids.copy() _UpperCAmelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] _UpperCAmelCase = outputs.attention_mask assert all(len(UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase ): _UpperCAmelCase = pred.label_ids _UpperCAmelCase = pred.predictions # all unnecessary tokens are removed _UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) _UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) _UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase ) )] ) / len(UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset _UpperCAmelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase , batch_size=UpperCAmelCase , remove_columns=['article', 'highlights'] , ) train_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) # same for validation dataset _UpperCAmelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase , batch_size=UpperCAmelCase , remove_columns=['article', 'highlights'] , ) val_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) _UpperCAmelCase = self.get_auto_remove_tmp_dir() _UpperCAmelCase = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase , per_device_train_batch_size=UpperCAmelCase , per_device_eval_batch_size=UpperCAmelCase , predict_with_generate=UpperCAmelCase , evaluation_strategy='steps' , do_train=UpperCAmelCase , do_eval=UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer _UpperCAmelCase = SeqaSeqTrainer( model=UpperCAmelCase , args=UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , tokenizer=UpperCAmelCase , ) # start training trainer.train()
39
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL UpperCAmelCase_ : int = logging.get_logger(__name__) def _A (__a ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__a ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["pixel_values"] def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ): '''simple docstring''' super().__init__(**lowercase_) SCREAMING_SNAKE_CASE_ : str = size if size is not None else {'''shortest_edge''': 256} SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''') SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize SCREAMING_SNAKE_CASE_ : List[Any] = size SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop SCREAMING_SNAKE_CASE_ : Dict = crop_size SCREAMING_SNAKE_CASE_ : List[Any] = resample SCREAMING_SNAKE_CASE_ : List[str] = do_rescale SCREAMING_SNAKE_CASE_ : List[str] = rescale_factor SCREAMING_SNAKE_CASE_ : List[Any] = offset SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize SCREAMING_SNAKE_CASE_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_) if "shortest_edge" in size: SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(lowercase_ , size['''shortest_edge'''] , default_to_square=lowercase_) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE_ : Union[str, Any] = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}') return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_) if "height" not in size or "width" not in size: raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}') return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = image.astype(np.floataa) if offset: SCREAMING_SNAKE_CASE_ : Tuple = image - (scale / 2) return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ): '''simple docstring''' return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''') # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ : List[str] = to_numpy_array(lowercase_) if do_resize: SCREAMING_SNAKE_CASE_ : List[Any] = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) if do_center_crop: SCREAMING_SNAKE_CASE_ : Dict = self.center_crop(lowercase_ , size=lowercase_) if do_rescale: SCREAMING_SNAKE_CASE_ : int = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_) if do_normalize: SCREAMING_SNAKE_CASE_ : Dict = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = to_channel_dimension_format(lowercase_ , lowercase_) return image def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ : Dict = offset if offset is not None else self.offset SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ : Dict = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_) SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''') if not valid_images(lowercase_): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') SCREAMING_SNAKE_CASE_ : Tuple = make_batched(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = [ [ self._preprocess_image( image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , ) for img in video ] for video in videos ] SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': videos} return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
91
0