code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import os import re _A = """src/diffusers""" # Pattern that looks at the indentation in a line. _A = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. _A = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. _A = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. _A = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. _A = re.compile(r"""\[([^\]]+)\]""") def lowercase_ ( __UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE_ ) return "" if search is None else search.groups()[0] def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> int: lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : List[str] = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(SCREAMING_SNAKE_CASE_ ): index += 1 lowerCAmelCase__ : int = ["""\n""".join(lines[:index] )] else: lowerCAmelCase__ : Union[str, Any] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCAmelCase__ : str = [lines[index]] index += 1 while index < len(SCREAMING_SNAKE_CASE_ ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE_ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(SCREAMING_SNAKE_CASE_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(SCREAMING_SNAKE_CASE_ ) ) if index < len(SCREAMING_SNAKE_CASE_ ) - 1: lowerCAmelCase__ : int = [lines[index + 1]] index += 1 else: lowerCAmelCase__ : Union[str, Any] = [] else: blocks.append("""\n""".join(SCREAMING_SNAKE_CASE_ ) ) lowerCAmelCase__ : str = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(SCREAMING_SNAKE_CASE_ ) > 0: blocks.append("""\n""".join(SCREAMING_SNAKE_CASE_ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE_ ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def lowercase_ ( __UpperCAmelCase ) -> int: def _inner(__UpperCAmelCase ): return key(SCREAMING_SNAKE_CASE_ ).lower().replace("""_""" , """""" ) return _inner def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> int: def noop(__UpperCAmelCase ): return x if key is None: lowerCAmelCase__ : Tuple = noop # Constants are all uppercase, they go first. lowerCAmelCase__ : Optional[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE_ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCAmelCase__ : Optional[int] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE_ )[0].isupper() and not key(SCREAMING_SNAKE_CASE_ ).isupper()] # Functions begin with a lowercase, they go last. lowerCAmelCase__ : List[str] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE_ )[0].isupper()] lowerCAmelCase__ : Tuple = ignore_underscore(SCREAMING_SNAKE_CASE_ ) return sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) + sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) + sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( __UpperCAmelCase ) -> str: def _replace(__UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = match.groups()[0] if "," not in imports: return f"""[{imports}]""" lowerCAmelCase__ : Optional[Any] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCAmelCase__ : List[Any] = keys[:-1] return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE_ )] ) + "]" lowerCAmelCase__ : int = import_statement.split("""\n""" ) if len(SCREAMING_SNAKE_CASE_ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCAmelCase__ : str = 2 if lines[1].strip() == """[""" else 1 lowerCAmelCase__ : Optional[int] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCAmelCase__ : str = sort_objects(SCREAMING_SNAKE_CASE_ , key=lambda __UpperCAmelCase : x[1] ) lowerCAmelCase__ : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(SCREAMING_SNAKE_CASE_ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCAmelCase__ : str = _re_bracket_content.sub(_replace , lines[1] ) else: lowerCAmelCase__ : str = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCAmelCase__ : str = keys[:-1] lowerCAmelCase__ : List[str] = get_indent(lines[1] ) + """, """.join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE_ )] ) return "\n".join(SCREAMING_SNAKE_CASE_ ) else: # Finally we have to deal with imports fitting on one line lowerCAmelCase__ : List[Any] = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE_ ) return import_statement def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=True ) -> List[Any]: with open(SCREAMING_SNAKE_CASE_ , """r""" ) as f: lowerCAmelCase__ : int = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCAmelCase__ : str = split_code_in_indented_blocks( SCREAMING_SNAKE_CASE_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCAmelCase__ : Optional[Any] = main_blocks[block_idx] lowerCAmelCase__ : Dict = block.split("""\n""" ) # Get to the start of the imports. lowerCAmelCase__ : List[Any] = 0 while line_idx < len(SCREAMING_SNAKE_CASE_ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ ) else: line_idx += 1 if line_idx >= len(SCREAMING_SNAKE_CASE_ ): continue # Ignore beginning and last line: they don't contain anything. lowerCAmelCase__ : Tuple = """\n""".join(block_lines[line_idx:-1] ) lowerCAmelCase__ : str = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCAmelCase__ : int = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE_ , indent_level=SCREAMING_SNAKE_CASE_ ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCAmelCase__ : Dict = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCAmelCase__ : Optional[Any] = [(pattern.search(SCREAMING_SNAKE_CASE_ ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE_ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCAmelCase__ : Optional[Any] = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE_ ) if key is not None] lowerCAmelCase__ : int = [x[0] for x in sorted(SCREAMING_SNAKE_CASE_ , key=lambda __UpperCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCAmelCase__ : Union[str, Any] = 0 lowerCAmelCase__ : List[str] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: lowerCAmelCase__ : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(SCREAMING_SNAKE_CASE_ ) count += 1 # And we put our main block back together with its first and last line. lowerCAmelCase__ : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(SCREAMING_SNAKE_CASE_ ): if check_only: return True else: print(f"""Overwriting {file}.""" ) with open(SCREAMING_SNAKE_CASE_ , """w""" ) as f: f.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) ) def lowercase_ ( __UpperCAmelCase=True ) -> Optional[Any]: lowerCAmelCase__ : int = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ): if "__init__.py" in files: lowerCAmelCase__ : Union[str, Any] = sort_imports(os.path.join(SCREAMING_SNAKE_CASE_ , """__init__.py""" ) , check_only=SCREAMING_SNAKE_CASE_ ) if result: lowerCAmelCase__ : Dict = [os.path.join(SCREAMING_SNAKE_CASE_ , """__init__.py""" )] if len(SCREAMING_SNAKE_CASE_ ) > 0: raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE_ )} files, run `make style`.""" ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") _A = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
242
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int: '''simple docstring''' A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = CLIPConfig __lowerCamelCase = ['CLIPEncoderLayer'] def __init__( self , lowercase ) -> Optional[int]: '''simple docstring''' super().__init__(lowercase ) A__ = CLIPVisionModel(config.vision_config ) A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase ) A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase ) @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy() A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy() A__ = [] A__ = image_embeds.shape[0] for i in range(lowercase ): A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A__ = special_cos_dist[i][concept_idx] A__ = self.special_care_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) A__ = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A__ = cos_dist[i][concept_idx] A__ = self.concept_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowercase ) result.append(lowercase ) A__ = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) A__ = cosine_distance(lowercase , self.special_care_embeds ) A__ = cosine_distance(lowercase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 A__ = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A__ = torch.any(special_scores > 0 , dim=1 ) A__ = special_care * 0.01 A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A__ = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
68
0
from PIL import Image def _a ( lowerCamelCase ): lowerCamelCase , lowerCamelCase : Dict = image.size lowerCamelCase : Union[str, Any] = 0 lowerCamelCase : Any = image.load() for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(SCREAMING_SNAKE_CASE_ ): lowerCamelCase : Optional[Any] = pixels[j, i] mean += pixel mean //= width * height for j in range(SCREAMING_SNAKE_CASE_ ): for i in range(SCREAMING_SNAKE_CASE_ ): lowerCamelCase : Optional[int] = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": _lowerCamelCase =mean_threshold(Image.open("""path_to_image""").convert("""L""")) image.save("""output_image_path""")
287
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
68
0
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): a = '''pt''' elif is_tf_available(): a = '''tf''' else: a = '''jax''' class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Any = PerceiverTokenizer UpperCAmelCase : str = False def lowerCAmelCase_ ( self : Tuple ): super().setUp() _A = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCAmelCase_ ( self : Dict ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def lowerCAmelCase_ ( self : List[Any] , **_UpperCAmelCase : Tuple ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=False , _UpperCAmelCase : Any=20 , _UpperCAmelCase : Dict=5 ): _A = [] for i in range(len(_UpperCAmelCase ) ): try: _A = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _A = list(filter(lambda _UpperCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) ) _A = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) ) if max_length is not None and len(_UpperCAmelCase ) > max_length: _A = toks[:max_length] if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0: while len(_UpperCAmelCase ) < min_length: _A = toks + toks # toks_str = [t[1] for t in toks] _A = [t[0] for t in toks] # Ensure consistency _A = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) if " " not in output_txt and len(_UpperCAmelCase ) > 1: _A = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase ) ) if with_prefix_space: _A = ' ' + output_txt _A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) return output_txt, output_ids def lowerCAmelCase_ ( self : List[str] ): _A = self.perceiver_tokenizer _A = 'Unicode €.' _A = tokenizer(_UpperCAmelCase ) _A = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['input_ids'] , _UpperCAmelCase ) # decoding _A = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' ) _A = tokenizer('e è é ê ë' ) _A = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['input_ids'] , _UpperCAmelCase ) # decoding _A = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def lowerCAmelCase_ ( self : List[str] ): _A = self.perceiver_tokenizer _A = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _A = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on _A = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) if FRAMEWORK != "jax": _A = list(batch.input_ids.numpy()[0] ) else: _A = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = self.perceiver_tokenizer _A = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _A = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _UpperCAmelCase ) self.assertIn('attention_mask' , _UpperCAmelCase ) self.assertNotIn('decoder_input_ids' , _UpperCAmelCase ) self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase ) def lowerCAmelCase_ ( self : str ): _A = self.perceiver_tokenizer _A = [ 'Summary of the text.', 'Another summary.', ] _A = tokenizer( text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def lowerCAmelCase_ ( self : Tuple ): _A = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _A = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _A = tempfile.mkdtemp() _A = ' He is very happy, UNwant\u00E9d,running' _A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) _A = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) _A = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) shutil.rmtree(_UpperCAmelCase ) _A = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _A = tempfile.mkdtemp() _A = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _A = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) _A = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) _A = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _A = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _A = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _A = json.load(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _A = json.load(_UpperCAmelCase ) _A = [F'''<extra_id_{i}>''' for i in range(125 )] _A = added_tokens_extra_ids + [ 'an_additional_special_token' ] _A = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_UpperCAmelCase , _UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_UpperCAmelCase , _UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _A = tokenizer_class.from_pretrained( _UpperCAmelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _A = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )] _A = tokenizer_class.from_pretrained( _UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '�' ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : Optional[Any] ): pass def lowerCAmelCase_ ( self : Optional[Any] ): pass def lowerCAmelCase_ ( self : List[Any] ): pass def lowerCAmelCase_ ( self : List[Any] ): _A = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _A = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] _A = tokenizer.convert_tokens_to_string(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
315
import string def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None: '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): A__ = "" for symbol in message: if symbol in string.ascii_uppercase: A__ = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ ) A__ = num - key if num < 0: A__ = num + len(string.ascii_uppercase ) A__ = translated + string.ascii_uppercase[num] else: A__ = translated + symbol print(F'Decryption using Key #{key}: {translated}' ) def lowerCAmelCase__ ( ) -> None: '''simple docstring''' A__ = input("Encrypted message: " ) A__ = message.upper() decrypt(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
68
0
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A : Tuple = 1_6 A : Dict = 3_2 def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 16 ): '''simple docstring''' __lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" ) __lowerCAmelCase = DatasetDict( { "train": dataset["train"].select(SCREAMING_SNAKE_CASE_ ), "validation": dataset["train"].select(SCREAMING_SNAKE_CASE_ ), "test": dataset["validation"], } ) def tokenize_function(_UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCAmelCase = datasets.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCAmelCase = 16 elif accelerator.mixed_precision != "no": __lowerCAmelCase = 8 else: __lowerCAmelCase = None return tokenizer.pad( SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , ) # Instantiate dataloaders. __lowerCAmelCase = DataLoader( tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = DataLoader( tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = DataLoader( tokenized_datasets["test"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) return train_dataloader, eval_dataloader, test_dataloader def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [] # Download the dataset __lowerCAmelCase = load_dataset("glue" , "mrpc" ) # Create our splits __lowerCAmelCase = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator __lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCAmelCase = config["lr"] __lowerCAmelCase = int(config["num_epochs"] ) __lowerCAmelCase = int(config["seed"] ) __lowerCAmelCase = int(config["batch_size"] ) __lowerCAmelCase = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation __lowerCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE __lowerCAmelCase = MAX_GPU_BATCH_SIZE set_seed(SCREAMING_SNAKE_CASE_ ) # New Code # # Create our folds: __lowerCAmelCase = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] ) __lowerCAmelCase = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_fold_dataloaders( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCAmelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCAmelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ ) # Instantiate scheduler __lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE_ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = outputs.loss __lowerCAmelCase = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = outputs.logits.argmax(dim=-1 ) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , ) __lowerCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , SCREAMING_SNAKE_CASE_ ) # New Code # # We also run predictions on the test set at the very end __lowerCAmelCase = [] for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = outputs.logits __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: __lowerCAmelCase = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) __lowerCAmelCase = torch.stack(SCREAMING_SNAKE_CASE_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) __lowerCAmelCase = metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ ) accelerator.print("Average test metrics from all folds:" , SCREAMING_SNAKE_CASE_ ) def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) # New Code # parser.add_argument("--num_folds" , type=SCREAMING_SNAKE_CASE_ , default=3 , help="The number of splits to perform across the dataset" ) __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
57
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = SpeechTaTokenizer __lowerCamelCase = False __lowerCamelCase = True def UpperCamelCase ( self ) -> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = SpeechTaTokenizer(lowercase ) A__ = AddedToken("<mask>" , lstrip=lowercase , rstrip=lowercase ) A__ = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = "this is a test" A__ = "this is a test" return input_text, output_text def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ) -> Optional[Any]: '''simple docstring''' A__ , A__ = self.get_input_output_texts(lowercase ) A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) return text, ids def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = "<pad>" A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-4] , "œ" ) self.assertEqual(vocab_keys[-2] , "<mask>" ) self.assertEqual(vocab_keys[-1] , "<ctc_blank>" ) self.assertEqual(len(lowercase ) , 81 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"] A__ = tokenizer.add_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size + len(lowercase ) ) A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} A__ = tokenizer.add_special_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size_a + len(lowercase ) ) A__ = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ = tokenizer.tokenize("This is a test" ) # fmt: off self.assertListEqual(lowercase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) A__ = tokenizer.convert_tokens_to_ids(lowercase ) # fmt: off self.assertListEqual(lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on A__ = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off A__ = { "input_ids": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowercase , )
68
0
'''simple docstring''' class a_ : '''simple docstring''' def __init__( self , A , A , A ) -> str: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = graph self._normalize_graph(A , A ) _SCREAMING_SNAKE_CASE = len(A ) _SCREAMING_SNAKE_CASE = None def snake_case_( self , A , A ) -> Optional[int]: if sources is int: _SCREAMING_SNAKE_CASE = [sources] if sinks is int: _SCREAMING_SNAKE_CASE = [sinks] if len(A ) == 0 or len(A ) == 0: return _SCREAMING_SNAKE_CASE = sources[0] _SCREAMING_SNAKE_CASE = sinks[0] # make fake vertex if there are more # than one source or sink if len(A ) > 1 or len(A ) > 1: _SCREAMING_SNAKE_CASE = 0 for i in sources: max_input_flow += sum(self.graph[i] ) _SCREAMING_SNAKE_CASE = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _SCREAMING_SNAKE_CASE = max_input_flow _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _SCREAMING_SNAKE_CASE = max_input_flow _SCREAMING_SNAKE_CASE = size - 1 def snake_case_( self ) -> Tuple: if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def snake_case_( self , A ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = algorithm(self ) class a_ : '''simple docstring''' def __init__( self , A ) -> List[Any]: _SCREAMING_SNAKE_CASE = flow_network _SCREAMING_SNAKE_CASE = flow_network.verticesCount _SCREAMING_SNAKE_CASE = flow_network.sourceIndex _SCREAMING_SNAKE_CASE = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _SCREAMING_SNAKE_CASE = flow_network.graph _SCREAMING_SNAKE_CASE = False def snake_case_( self ) -> List[str]: if not self.executed: self._algorithm() _SCREAMING_SNAKE_CASE = True def snake_case_( self ) -> Union[str, Any]: pass class a_ ( snake_case_ ): '''simple docstring''' def __init__( self , A ) -> List[Any]: super().__init__(A ) # use this to save your result _SCREAMING_SNAKE_CASE = -1 def snake_case_( self ) -> List[str]: if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class a_ ( snake_case_ ): '''simple docstring''' def __init__( self , A ) -> Dict: super().__init__(A ) _SCREAMING_SNAKE_CASE = [[0] * self.verticies_count for i in range(self.verticies_count )] _SCREAMING_SNAKE_CASE = [0] * self.verticies_count _SCREAMING_SNAKE_CASE = [0] * self.verticies_count def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _SCREAMING_SNAKE_CASE = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _SCREAMING_SNAKE_CASE = 0 while i < len(A ): _SCREAMING_SNAKE_CASE = vertices_list[i] _SCREAMING_SNAKE_CASE = self.heights[vertex_index] self.process_vertex(A ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(A ) ) _SCREAMING_SNAKE_CASE = 0 else: i += 1 _SCREAMING_SNAKE_CASE = sum(self.preflow[self.source_index] ) def snake_case_( self , A ) -> str: while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(A , A ) self.relabel(A ) def snake_case_( self , A , A ) -> Dict: _SCREAMING_SNAKE_CASE = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def snake_case_( self , A ) -> Dict: _SCREAMING_SNAKE_CASE = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _SCREAMING_SNAKE_CASE = self.heights[to_index] if min_height is not None: _SCREAMING_SNAKE_CASE = min_height + 1 if __name__ == "__main__": lowercase_ = [0] lowercase_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase_ = flow_network.find_maximum_flow() print(f"""maximum flow is {maximum_flow}""")
58
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> List[str]: '''simple docstring''' A__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A__ = F'{src_lang}-{tgt_lang}' A__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(F'Generating {path}' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent lowerCAmelCase__ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = model_name.split("""-""") lowerCAmelCase__ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
68
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowerCAmelCase ( metaclass=__snake_case ): '''simple docstring''' lowerCAmelCase_ = ["transformers", "torch", "note_seq"] def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(self , ["""transformers""", """torch""", """note_seq"""] ) @classmethod def lowercase (cls , *UpperCAmelCase , **UpperCAmelCase ) -> Dict: requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] ) @classmethod def lowercase (cls , *UpperCAmelCase , **UpperCAmelCase ) -> Dict: requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
341
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = feature_size A__ = sampling_rate A__ = padding_value A__ = kwargs.pop("padding_side" , "right" ) A__ = kwargs.pop("return_attention_mask" , lowercase ) super().__init__(**lowercase ) def UpperCamelCase ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature: '''simple docstring''' if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): A__ = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F' to this method that includes {self.model_input_names[0]}, but you provided' F' {list(processed_features.keys() )}' ) A__ = processed_features[self.model_input_names[0]] A__ = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase ) == 0: if return_attention_mask: A__ = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch A__ = required_input[0] if isinstance(lowercase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. A__ = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase ): A__ = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase ): A__ = "tf" elif is_torch_tensor(lowercase ): A__ = "pt" elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ): A__ = "np" else: raise ValueError( F'type of {first_element} unknown: {type(lowercase )}. ' "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): A__ = to_numpy(lowercase ) else: A__ = [to_numpy(lowercase ) for v in value] # Convert padding_strategy in PaddingStrategy A__ = self._get_padding_strategies(padding=lowercase , max_length=lowercase ) A__ = processed_features[self.model_input_names[0]] A__ = len(lowercase ) if not all(len(lowercase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) A__ = [] for i in range(lowercase ): A__ = {k: v[i] for k, v in processed_features.items()} # truncation A__ = self._truncate( lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , ) truncated_inputs.append(lowercase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) A__ = PaddingStrategy.MAX_LENGTH A__ = {} for i in range(lowercase ): # padding A__ = self._pad( truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , ) for key, value in outputs.items(): if key not in batch_outputs: A__ = [] if value.dtype is np.dtype(np.floataa ): A__ = value.astype(np.floataa ) batch_outputs[key].append(lowercase ) return BatchFeature(lowercase , tensor_type=lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict: '''simple docstring''' A__ = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: A__ = len(lowercase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: A__ = np.ones(len(lowercase ) , dtype=np.intaa ) if needs_to_be_padded: A__ = max_length - len(lowercase ) if self.padding_side == "right": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (0, difference) ) A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (difference, 0) ) A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]: '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) A__ = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = len(lowercase ) > max_length if needs_to_be_truncated: A__ = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: A__ = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase ( self , lowercase=False , lowercase=None ) -> Any: '''simple docstring''' if padding is not False: if padding is True: A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase , lowercase ): A__ = PaddingStrategy(lowercase ) elif isinstance(lowercase , lowercase ): A__ = padding else: A__ = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
68
0
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def a ( A__ : List[str] ) -> List[Any]: """simple docstring""" return EnvironmentCommand() def a ( A__ : Union[str, Any] ) -> Tuple: """simple docstring""" return EnvironmentCommand(args.accelerate_config_file ) class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): @staticmethod def A__ ( lowerCAmelCase ) -> List[Any]: '''simple docstring''' _lowercase =parser.add_parser('env' ) download_parser.set_defaults(func=lowerCAmelCase ) download_parser.add_argument( '--accelerate-config_file' , default=lowerCAmelCase , help='The accelerate config file to use for the default values in the launching script.' , ) download_parser.set_defaults(func=lowerCAmelCase ) def __init__( self , lowerCAmelCase , *lowerCAmelCase ) -> None: '''simple docstring''' _lowercase =accelerate_config_file def A__ ( self ) -> int: '''simple docstring''' _lowercase ='not installed' if is_safetensors_available(): import safetensors _lowercase =safetensors.__version__ elif importlib.util.find_spec('safetensors' ) is not None: import safetensors _lowercase =F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' _lowercase ='not installed' _lowercase =_lowercase ='not found' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file _lowercase =accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase ): _lowercase =load_config_from_file(self._accelerate_config_file ).to_dict() _lowercase =( '\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else F'''\t{accelerate_config}''' ) _lowercase ='not installed' _lowercase ='NA' if is_torch_available(): import torch _lowercase =torch.__version__ _lowercase =torch.cuda.is_available() _lowercase ='not installed' _lowercase ='NA' if is_tf_available(): import tensorflow as tf _lowercase =tf.__version__ try: # deprecated in v2.1 _lowercase =tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool _lowercase =bool(tf.config.list_physical_devices('GPU' ) ) _lowercase ='not installed' _lowercase ='not installed' _lowercase ='not installed' _lowercase ='NA' if is_flax_available(): import flax import jax import jaxlib _lowercase =flax.__version__ _lowercase =jax.__version__ _lowercase =jaxlib.__version__ _lowercase =jax.lib.xla_bridge.get_backend().platform _lowercase ={ '`transformers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Huggingface_hub version': huggingface_hub.__version__, 'Safetensors version': F'''{safetensors_version}''', 'Accelerate version': F'''{accelerate_version}''', 'Accelerate config': F'''{accelerate_config_str}''', 'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''', 'Tensorflow version (GPU?)': F'''{tf_version} ({tf_cuda_available})''', 'Flax version (CPU?/GPU?/TPU?)': F'''{flax_version} ({jax_backend})''', 'Jax version': F'''{jax_version}''', 'JaxLib version': F'''{jaxlib_version}''', 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(lowerCAmelCase ) ) return info @staticmethod def A__ ( lowerCAmelCase ) -> Optional[int]: '''simple docstring''' return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
205
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A: List[Any] = { "configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"], "tokenization_tapas": ["TapasTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A: str = [ "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TapasForMaskedLM", "TapasForQuestionAnswering", "TapasForSequenceClassification", "TapasModel", "TapasPreTrainedModel", "load_tf_weights_in_tapas", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A: Tuple = [ "TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TFTapasForMaskedLM", "TFTapasForQuestionAnswering", "TFTapasForSequenceClassification", "TFTapasModel", "TFTapasPreTrainedModel", ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys A: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
109
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'gpt_neox_japanese' def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict: '''simple docstring''' super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_multiple_size A__ = hidden_act A__ = rotary_pct A__ = rotary_emb_base A__ = initializer_range A__ = layer_norm_eps A__ = use_cache A__ = attention_dropout A__ = hidden_dropout
68
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ : Any = { 'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'], 'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Dict = ['BertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Any = [ 'BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BertForMaskedLM', 'BertForMultipleChoice', 'BertForNextSentencePrediction', 'BertForPreTraining', 'BertForQuestionAnswering', 'BertForSequenceClassification', 'BertForTokenClassification', 'BertLayer', 'BertLMHeadModel', 'BertModel', 'BertPreTrainedModel', 'load_tf_weights_in_bert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Dict = [ 'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBertEmbeddings', 'TFBertForMaskedLM', 'TFBertForMultipleChoice', 'TFBertForNextSentencePrediction', 'TFBertForPreTraining', 'TFBertForQuestionAnswering', 'TFBertForSequenceClassification', 'TFBertForTokenClassification', 'TFBertLMHeadModel', 'TFBertMainLayer', 'TFBertModel', 'TFBertPreTrainedModel', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Dict = ['TFBertTokenizer'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : int = [ 'FlaxBertForCausalLM', 'FlaxBertForMaskedLM', 'FlaxBertForMultipleChoice', 'FlaxBertForNextSentencePrediction', 'FlaxBertForPreTraining', 'FlaxBertForQuestionAnswering', 'FlaxBertForSequenceClassification', 'FlaxBertForTokenClassification', 'FlaxBertModel', 'FlaxBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
225
import warnings from functools import wraps from typing import Callable def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable ) -> Callable: '''simple docstring''' @wraps(SCREAMING_SNAKE_CASE_ ) def _inner_fn(*SCREAMING_SNAKE_CASE_: int , **SCREAMING_SNAKE_CASE_: Union[str, Any] ): warnings.warn( (F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE_ , ) return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return _inner_fn
68
0
"""simple docstring""" class _a : """simple docstring""" def __init__( self : Optional[int] )->None: _UpperCAmelCase = {} # Mapping from char to TrieNode _UpperCAmelCase = False def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : List[Any] )->None: for word in words: self.insert(__UpperCamelCase ) def lowercase__ ( self : str , __UpperCamelCase : List[Any] )->None: _UpperCAmelCase = self for char in word: if char not in curr.nodes: _UpperCAmelCase = TrieNode() _UpperCAmelCase = curr.nodes[char] _UpperCAmelCase = True def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Union[str, Any] )->bool: _UpperCAmelCase = self for char in word: if char not in curr.nodes: return False _UpperCAmelCase = curr.nodes[char] return curr.is_leaf def lowercase__ ( self : List[str] , __UpperCamelCase : Dict )->None: def _delete(__UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) -> bool: if index == len(__UpperCamelCase ): # If word does not exist if not curr.is_leaf: return False _UpperCAmelCase = False return len(curr.nodes ) == 0 _UpperCAmelCase = word[index] _UpperCAmelCase = curr.nodes.get(__UpperCamelCase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted _UpperCAmelCase = _delete(__UpperCamelCase , __UpperCamelCase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , __UpperCamelCase , 0 ) def lowercase ( _SCREAMING_SNAKE_CASE : TrieNode , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if node.is_leaf: print(SCREAMING_SNAKE_CASE_ , end=''' ''' ) for key, value in node.nodes.items(): print_words(SCREAMING_SNAKE_CASE_ , word + key ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split() _UpperCAmelCase = TrieNode() root.insert_many(SCREAMING_SNAKE_CASE_ ) # print_words(root, "") assert all(root.find(SCREAMING_SNAKE_CASE_ ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ): '''simple docstring''' print(str(SCREAMING_SNAKE_CASE_ ) , '''works!''' if passes else '''doesn\'t work :(''' ) def lowercase ( ): '''simple docstring''' assert test_trie() def lowercase ( ): '''simple docstring''' print_results('''Testing trie functionality''' , test_trie() ) if __name__ == "__main__": main()
260
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) lowerCAmelCase__ = """\ Text data. Second line of data.""" lowerCAmelCase__ = """file""" @pytest.fixture(scope="session" ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") A__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" ) with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> List[str]: '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return FILE_PATH @pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Any: '''simple docstring''' A__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} A__ = input_paths[compression_format] A__ = tmp_path / "cache" A__ = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" , [True, False] ) @pytest.mark.parametrize("default_cache_dir" , [True, False] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Dict: '''simple docstring''' A__ = "custom_cache" A__ = "custom_extracted_dir" A__ = tmp_path / "custom_extracted_path" if default_extracted: A__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) ) A__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) A__ = xz_file A__ = ( DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) ) A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]: '''simple docstring''' A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file # relative path A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]: '''simple docstring''' A__ = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) # relative path A__ = "./__missing_file__.txt" with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]: '''simple docstring''' A__ = get_from_cache(F'tmp://{tmpfs_file}' ) with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( ) -> List[Any]: '''simple docstring''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_head("s3://huggingface.co" )
68
0
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase =get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class a__ ( UpperCAmelCase__ , unittest.TestCase ): lowerCamelCase : Optional[int] =ReformerTokenizer lowerCamelCase : Union[str, Any] =ReformerTokenizerFast lowerCamelCase : Any =True lowerCamelCase : Tuple =False lowerCamelCase : List[Any] =True def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" super().setUp() __lowerCamelCase = ReformerTokenizer(a , keep_accents=a ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase = '''<s>''' __lowerCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" __lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(a ) , 10_00 ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if not self.test_rust_tokenizer: return __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = tokenizer.tokenize(a ) __lowerCamelCase = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a ) __lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = tokenizer.encode(a ) __lowerCamelCase = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : Any , a : Optional[int]=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCamelCase = self.rust_tokenizer_class.from_pretrained(a , **a ) # Simple input __lowerCamelCase = '''This is a simple input''' __lowerCamelCase = ['''This is a simple input 1''', '''This is a simple input 2'''] __lowerCamelCase = ('''This is a simple input''', '''This is a pair''') __lowerCamelCase = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='''max_length''' ) # Simple input self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='''max_length''' ) # Simple input self.assertRaises( a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='''max_length''' , ) # Pair input self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='''max_length''' ) # Pair input self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='''max_length''' ) # Pair input self.assertRaises( a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='''max_length''' , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = ReformerTokenizer(a , keep_accents=a ) __lowerCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a ) , [2_85, 46, 10, 1_70, 3_82] , ) __lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( a , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(a ) self.assertListEqual( a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(a ) self.assertListEqual( a , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = '''Hello World!''' __lowerCamelCase = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(a , self.big_tokenizer.encode(a ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) __lowerCamelCase = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(a , self.big_tokenizer.encode(a ) ) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" import torch from transformers import ReformerConfig, ReformerModel # Build sequence __lowerCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10] __lowerCamelCase = ''' '''.join(a ) __lowerCamelCase = self.big_tokenizer.encode_plus(a , return_tensors='''pt''' ) __lowerCamelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' ) __lowerCamelCase = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) __lowerCamelCase = encoded_sequence['''input_ids'''].shape __lowerCamelCase = ReformerModel(a ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**a ) model(**a ) @slow def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 __lowerCamelCase = [ '''This is a very simple sentence.''', '''The quick brown fox jumps over the lazy dog.''', ] self.tokenizer_integration_test_util( expected_encoding=a , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=a , sequences=a , )
67
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class a__ : """simple docstring""" __lowerCamelCase = BlenderbotSmallConfig __lowerCamelCase = {} __lowerCamelCase = 'gelu' def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A__ = tf.concat([input_ids, eos_tensor] , axis=1 ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder() A__ = inputs_dict["input_ids"] A__ = input_ids[:1, :] A__ = inputs_dict["attention_mask"][:1, :] A__ = inputs_dict["head_mask"] A__ = 1 # first forward pass A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) A__ , A__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A__ = tf.concat([input_ids, next_tokens] , axis=-1 ) A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A__ = model(lowercase , attention_mask=lowercase )[0] A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A__ = output_from_no_past[:, -3:, random_slice_idx] A__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = TFBlenderbotSmallModelTester(self ) A__ = ConfigTester(self , config_class=lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_tokenizers @require_tf class a__ ( unittest.TestCase ): """simple docstring""" __lowerCamelCase = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] __lowerCamelCase = 'facebook/blenderbot_small-90M' @cached_property def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.tokenizer(self.src_text , return_tensors="tf" ) A__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , ) A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
68
0
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES _A = logging.get_logger(__name__) _A = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) _A = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) _A = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) _A = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) _A = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) _A = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) _A = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) _A = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) _A = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) _A = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) _A = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) _A = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) _A = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) _A = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) _A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) _A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) _A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) _A = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) _A = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) _A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) _A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) _A = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) _A = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) _A = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) _A = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) _A = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) _A = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) _A = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :List[str] = FLAX_MODEL_MAPPING _A = auto_class_update(FlaxAutoModel) class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING _A = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING _A = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING _A = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _A = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _A = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING _A = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :int = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING _A = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING _A = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING _A = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :Any = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _A = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :Tuple = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING _A = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class _lowerCamelCase ( _BaseAutoModelClass ): _lowerCamelCase :int = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING _A = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
242
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = ['pixel_values'] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None: '''simple docstring''' super().__init__(**lowercase ) A__ = size if size is not None else {"height": 384, "width": 384} A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = do_resize A__ = size A__ = resample A__ = do_rescale A__ = rescale_factor A__ = do_normalize A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A__ = image_std if image_std is not None else OPENAI_CLIP_STD A__ = do_convert_rgb def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' A__ = get_size_dict(lowercase , default_to_square=lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' ) A__ = (size["height"], size["width"]) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[Any]: '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image: '''simple docstring''' A__ = do_resize if do_resize is not None else self.do_resize A__ = resample if resample is not None else self.resample A__ = do_rescale if do_rescale is not None else self.do_rescale A__ = rescale_factor if rescale_factor is not None else self.rescale_factor A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = image_mean if image_mean is not None else self.image_mean A__ = image_std if image_std is not None else self.image_std A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A__ = size if size is not None else self.size A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: A__ = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A__ = [to_numpy_array(lowercase ) for image in images] if do_resize: A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_rescale: A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A__ = BatchFeature(data={"pixel_values": images} , tensor_type=lowercase ) return encoded_outputs
68
0
from functools import reduce _lowerCamelCase =( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def _a ( lowerCamelCase = N ): return max( # mypy cannot properly interpret reduce int(reduce(lambda lowerCamelCase, lowerCamelCase : str(int(SCREAMING_SNAKE_CASE_ ) * int(SCREAMING_SNAKE_CASE_ ) ), n[i : i + 13] ) ) for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ) ) if __name__ == "__main__": print(f'''{solution() = }''')
287
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCAmelCase__ = """hf-internal-testing/tiny-random-bert""" lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") lowerCAmelCase__ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = cached_file(lowercase , lowercase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(lowercase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(lowercase , lowercase ) ) ) with open(os.path.join(lowercase , "refs" , "main" ) ) as f: A__ = f.read() self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) ) self.assertTrue(os.path.isfile(lowercase ) ) # File is cached at the same place the second time. A__ = cached_file(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) # Using a specific revision to test the full commit hash. A__ = cached_file(lowercase , lowercase , revision="9b8c223" ) self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ): A__ = cached_file("tiny-random-bert" , lowercase ) with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ): A__ = cached_file(lowercase , lowercase , revision="aaaa" ) with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ): A__ = cached_file(lowercase , "conf" ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ): A__ = cached_file(lowercase , "conf" ) with open(os.path.join(lowercase , "refs" , "main" ) ) as f: A__ = f.read() self.assertTrue(os.path.isfile(os.path.join(lowercase , ".no_exist" , lowercase , "conf" ) ) ) A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A__ = cached_file(lowercase , "conf" , local_files_only=lowercase , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A__ = mock.Mock() A__ = 500 A__ = {} A__ = HTTPError A__ = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=lowercase ) as mock_head: A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_connection_errors=lowercase ) self.assertIsNone(lowercase ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) def UpperCamelCase ( self ) -> str: '''simple docstring''' self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ): get_file_from_repo("bert-base-case" , lowercase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ): get_file_from_repo("bert-base-cased" , lowercase , revision="ahaha" ) A__ = get_file_from_repo("bert-base-cased" , lowercase ) # The name is the cached name which is not very easy to test, so instead we load the content. A__ = json.loads(open(lowercase , "r" ).read() ) self.assertEqual(config["hidden_size"] , 768 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: A__ = Path(lowercase ) / "a.txt" filename.touch() self.assertEqual(get_file_from_repo(lowercase , "a.txt" ) , str(lowercase ) ) self.assertIsNone(get_file_from_repo(lowercase , "b.txt" ) )
68
0
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _snake_case ( _snake_case : Optional[int] , _snake_case : Union[str, Any]=7 ) -> str: '''simple docstring''' _A = None if token is not None: _A = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''} # The id of a workflow (not of a workflow run) _A = '636036' _A = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' _A = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() return result["workflow_runs"] def _snake_case ( _snake_case : Optional[Any] ) -> Optional[int]: '''simple docstring''' _A = get_daily_ci_runs(SCREAMING_SNAKE_CASE_ ) _A = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": _A = workflow_run['id'] break return workflow_run_id def _snake_case ( _snake_case : Tuple , _snake_case : List[str] , _snake_case : str ) -> List[str]: '''simple docstring''' _A = get_last_daily_ci_runs(SCREAMING_SNAKE_CASE_ ) if workflow_run_id is not None: _A = get_artifacts_links(worflow_run_id=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: _A = artifacts_links[artifact_name] download_artifact( artifact_name=SCREAMING_SNAKE_CASE_ , artifact_url=SCREAMING_SNAKE_CASE_ , output_dir=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[str] ) -> Optional[int]: '''simple docstring''' get_last_daily_ci_artifacts(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _A = {} for artifact_name in artifact_names: _A = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' ) if os.path.isfile(SCREAMING_SNAKE_CASE_ ): _A = {} with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file with z.open(SCREAMING_SNAKE_CASE_ ) as f: _A = f.read().decode('UTF-8' ) return results
315
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = AutoencoderKL __lowerCamelCase = 'sample' __lowerCamelCase = 1e-2 @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = 4 A__ = 3 A__ = (32, 32) A__ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase ) return {"sample": image} @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } A__ = self.dummy_input return init_dict, inputs_dict def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ , A__ = self.prepare_init_args_and_inputs_for_common() A__ = self.model_class(**lowercase ) model.to(lowercase ) assert not model.is_gradient_checkpointing and model.training A__ = model(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() A__ = torch.randn_like(lowercase ) A__ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing A__ = self.model_class(**lowercase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowercase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training A__ = model_a(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() A__ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) A__ = dict(model.named_parameters() ) A__ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ , A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(lowercase ) A__ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) A__ = model.to(lowercase ) model.eval() if torch_device == "mps": A__ = torch.manual_seed(0 ) else: A__ = torch.Generator(device=lowercase ).manual_seed(0 ) A__ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) A__ = image.to(lowercase ) with torch.no_grad(): A__ = model(lowercase , sample_posterior=lowercase , generator=lowercase ).sample A__ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": A__ = torch.tensor( [ -4.00_78e-01, -3.83_23e-04, -1.26_81e-01, -1.14_62e-01, 2.00_95e-01, 1.08_93e-01, -8.82_47e-02, -3.03_61e-01, -9.86_44e-03, ] ) elif torch_device == "cpu": A__ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: A__ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) ) @slow class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' return F'gaussian_noise_s={seed}_shape={"_".join([str(lowercase ) for s in shape] )}.npy' def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self , lowercase=0 , lowercase=(4, 3, 512, 512) , lowercase=False ) -> Optional[int]: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa A__ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) ).to(lowercase ).to(lowercase ) return image def UpperCamelCase ( self , lowercase="CompVis/stable-diffusion-v1-4" , lowercase=False ) -> Any: '''simple docstring''' A__ = "fp16" if fpaa else None A__ = torch.floataa if fpaa else torch.floataa A__ = AutoencoderKL.from_pretrained( lowercase , subfolder="vae" , torch_dtype=lowercase , revision=lowercase , ) model.to(lowercase ).eval() return model def UpperCamelCase ( self , lowercase=0 ) -> List[str]: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(lowercase ) return torch.Generator(device=lowercase ).manual_seed(lowercase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , fpaa=lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) with torch.no_grad(): A__ = model(lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Tuple: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model.encode(lowercase ).latent_dist A__ = dist.sample(generator=lowercase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] A__ = sample[0, -1, -3:, -3:].flatten().cpu() A__ = torch.tensor(lowercase ) A__ = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(lowercase , lowercase , atol=lowercase )
68
0
"""simple docstring""" from __future__ import annotations import time import numpy as np A : str = [8, 5, 9, 7] A : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A : List[str] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _UpperCamelCase : '''simple docstring''' def __init__( self , __a , __a , __a , ): __lowerCAmelCase = claim_vector __lowerCAmelCase = allocated_resources_table __lowerCAmelCase = maximum_claim_table def snake_case ( self ): return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def snake_case ( self ): return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def snake_case ( self ): return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def snake_case ( self ): return {self.__need().index(__a ): i for i in self.__need()} def snake_case ( self , **__a ): __lowerCAmelCase = self.__need() __lowerCAmelCase = self.__allocated_resources_table __lowerCAmelCase = self.__available_resources() __lowerCAmelCase = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: __lowerCAmelCase = False for each_need in need_list: __lowerCAmelCase = True for index, need in enumerate(__a ): if need > available_resources[index]: __lowerCAmelCase = False break if execution: __lowerCAmelCase = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __lowerCAmelCase = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__a ) # update available/freed resources stack __lowerCAmelCase = np.array(__a ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__a ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def snake_case ( self ): print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__a ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__a ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__a ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__a ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
57
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCAmelCase__ = logging.getLogger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = label_idx def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: A__ = [] A__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 A__ = [] A__ = [] else: A__ = line.split(" " ) words.append(splits[0] ) if len(lowercase ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(lowercase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(lowercase ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class a__ ( snake_case ): """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class a__ ( snake_case ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: for sentence in parse_incr(lowercase ): A__ = [] A__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(lowercase ) == len(lowercase ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = 0 for sentence in parse_incr(lowercase ): A__ = preds_list[example_id] A__ = "" for token in sentence: out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase ) example_id += 1 def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
68
0
'''simple docstring''' def lowerCamelCase ( __lowerCamelCase : list[int] ) ->int: if not numbers: return 0 if not isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) or not all( isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) _SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = numbers[0] for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): # update the maximum and minimum subarray products _SCREAMING_SNAKE_CASE = numbers[i] if number < 0: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = min_till_now, max_till_now _SCREAMING_SNAKE_CASE = max(SCREAMING_SNAKE_CASE_ , max_till_now * number ) _SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ , min_till_now * number ) # update the maximum product found till now _SCREAMING_SNAKE_CASE = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return max_prod
58
import random class a__ : """simple docstring""" @staticmethod def UpperCamelCase ( lowercase ) -> tuple[list[int], list[int]]: '''simple docstring''' A__ = [ord(lowercase ) for i in text] A__ = [] A__ = [] for i in plain: A__ = random.randint(1 , 300 ) A__ = (i + k) * k cipher.append(lowercase ) key.append(lowercase ) return cipher, key @staticmethod def UpperCamelCase ( lowercase , lowercase ) -> str: '''simple docstring''' A__ = [] for i in range(len(lowercase ) ): A__ = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowercase ) ) return "".join(lowercase ) if __name__ == "__main__": lowerCAmelCase__ , lowerCAmelCase__ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
68
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowerCAmelCase = logging.get_logger(__name__) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = ["pixel_values"] def __init__(self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , **UpperCAmelCase , ) -> None: super().__init__(**UpperCAmelCase ) _snake_case = size if size is not None else {"""height""": 384, """width""": 384} _snake_case = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) _snake_case = do_resize _snake_case = size _snake_case = resample _snake_case = do_rescale _snake_case = rescale_factor _snake_case = do_normalize _snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _snake_case = image_std if image_std is not None else OPENAI_CLIP_STD _snake_case = do_convert_rgb def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: _snake_case = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) _snake_case = (size["""height"""], size["""width"""]) return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> Optional[Any]: return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image: _snake_case = do_resize if do_resize is not None else self.do_resize _snake_case = resample if resample is not None else self.resample _snake_case = do_rescale if do_rescale is not None else self.do_rescale _snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor _snake_case = do_normalize if do_normalize is not None else self.do_normalize _snake_case = image_mean if image_mean is not None else self.image_mean _snake_case = image_std if image_std is not None else self.image_std _snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _snake_case = size if size is not None else self.size _snake_case = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) _snake_case = make_list_of_images(UpperCAmelCase ) if not valid_images(UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: _snake_case = [convert_to_rgb(UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. _snake_case = [to_numpy_array(UpperCAmelCase ) for image in images] if do_resize: _snake_case = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images] if do_rescale: _snake_case = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images] if do_normalize: _snake_case = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images] _snake_case = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] _snake_case = BatchFeature(data={"""pixel_values""": images} , tensor_type=UpperCAmelCase ) return encoded_outputs
341
def lowerCAmelCase__ ( ) -> Any: '''simple docstring''' for n in range(1 , 1_0_0_0_0_0_0 ): yield n * (n + 1) // 2 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Any: '''simple docstring''' A__ = 1 A__ = 2 while i * i <= n: A__ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowerCAmelCase__ ( ) -> Dict: '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE_ ) > 5_0_0 ) if __name__ == "__main__": print(solution())
68
0
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __lowerCAmelCase ( unittest.TestCase ): def A__ ( self ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A__ ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) _lowercase =UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return model @property def A__ ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) _lowercase =UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , ) return model @property def A__ ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) _lowercase =AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , ) _lowercase =UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return vqvae, unet @slow def A__ ( self ) -> str: '''simple docstring''' _lowercase ='cpu' # ensure determinism for the device-dependent torch.Generator _lowercase =Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) _lowercase =DDPMScheduler() _lowercase =AudioDiffusionPipeline(vqvae=lowerCAmelCase , unet=self.dummy_unet , mel=lowerCAmelCase , scheduler=lowerCAmelCase ) _lowercase =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) _lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(42 ) _lowercase =pipe(generator=lowerCAmelCase , steps=4 ) _lowercase =output.audios[0] _lowercase =output.images[0] _lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(42 ) _lowercase =pipe(generator=lowerCAmelCase , steps=4 , return_dict=lowerCAmelCase ) _lowercase =output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) _lowercase =np.frombuffer(image.tobytes() , dtype='uint8' )[:10] _lowercase =np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10] _lowercase =np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 _lowercase =Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) _lowercase =DDIMScheduler() _lowercase =self.dummy_vqvae_and_unet _lowercase =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCAmelCase , scheduler=lowerCAmelCase ) _lowercase =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) np.random.seed(0 ) _lowercase =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) _lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(42 ) _lowercase =pipe(raw_audio=lowerCAmelCase , generator=lowerCAmelCase , start_step=5 , steps=10 ) _lowercase =output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) _lowercase =np.frombuffer(image.tobytes() , dtype='uint8' )[:10] _lowercase =np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 _lowercase =self.dummy_unet_condition _lowercase =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCAmelCase , mel=lowerCAmelCase , scheduler=lowerCAmelCase ) _lowercase =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) np.random.seed(0 ) _lowercase =torch.rand((1, 1, 10) ) _lowercase =pipe(generator=lowerCAmelCase , encoding=lowerCAmelCase ) _lowercase =output.images[0] _lowercase =np.frombuffer(image.tobytes() , dtype='uint8' )[:10] _lowercase =np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def A__ ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self ) -> Optional[Any]: '''simple docstring''' _lowercase =torch_device _lowercase =DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' ) _lowercase =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) _lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(42 ) _lowercase =pipe(generator=lowerCAmelCase ) _lowercase =output.audios[0] _lowercase =output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] _lowercase =np.frombuffer(image.tobytes() , dtype='uint8' )[:10] _lowercase =np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
205
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCAmelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: lowerCAmelCase__ = json.load(f) @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return FSMTTokenizer.from_pretrained(lowercase ) def UpperCamelCase ( self , lowercase ) -> Optional[int]: '''simple docstring''' A__ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = F'facebook/wmt19-{pair}' A__ = self.get_tokenizer(lowercase ) A__ = self.get_model(lowercase ) A__ = bleu_data[pair]["src"] A__ = bleu_data[pair]["tgt"] A__ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase ) A__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A__ = tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) A__ = calculate_bleu(lowercase , lowercase ) print(lowercase ) self.assertGreaterEqual(scores["bleu"] , lowercase )
68
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A: Any = { "configuration_groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A: List[str] = [ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A: Optional[int] = [ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys A: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
109
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> list: '''simple docstring''' A__ = int(SCREAMING_SNAKE_CASE_ ) if n_element < 1: A__ = ValueError("a should be a positive number" ) raise my_error A__ = [1] A__ , A__ , A__ = (0, 0, 0) A__ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": lowerCAmelCase__ = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") lowerCAmelCase__ = hamming(int(n)) print("""-----------------------------------------------------""") print(f"""The list with nth numbers is: {hamming_numbers}""") print("""-----------------------------------------------------""")
68
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device lowerCamelCase__ : Union[str, Any] = False class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : Any ): SCREAMING_SNAKE_CASE_ = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' ) # remove text_unet pipe.remove_unused_weights() pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger ' SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = pipe( prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = pipe( prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def lowerCAmelCase_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE_ = VersatileDiffusionTextToImagePipeline.from_pretrained( 'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger ' SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = pipe( prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images SCREAMING_SNAKE_CASE_ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
225
import copy import random from transformers import CLIPTokenizer class a__ ( snake_case ): """simple docstring""" def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' super().__init__(*lowercase , **lowercase ) A__ = {} def UpperCamelCase ( self , lowercase , *lowercase , **lowercase ) -> str: '''simple docstring''' A__ = super().add_tokens(lowercase , *lowercase , **lowercase ) if num_added_tokens == 0: raise ValueError( F'The tokenizer already contains the token {placeholder_token}. Please pass a different' " `placeholder_token` that is not already in the tokenizer." ) def UpperCamelCase ( self , lowercase , *lowercase , lowercase=1 , **lowercase ) -> Any: '''simple docstring''' A__ = [] if num_vec_per_token == 1: self.try_adding_tokens(lowercase , *lowercase , **lowercase ) output.append(lowercase ) else: A__ = [] for i in range(lowercase ): A__ = placeholder_token + F'_{i}' self.try_adding_tokens(lowercase , *lowercase , **lowercase ) output.append(lowercase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'The tokenizer already has placeholder token {token} that can get confused with' F' {placeholder_token}keep placeholder tokens independent' ) A__ = output def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=1.0 ) -> List[Any]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = [] for i in range(len(lowercase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: A__ = self.token_map[placeholder_token] A__ = tokens[: 1 + int(len(lowercase ) * prop_tokens_to_load )] if vector_shuffle: A__ = copy.copy(lowercase ) random.shuffle(lowercase ) A__ = text.replace(lowercase , " ".join(lowercase ) ) return text def __call__( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> str: '''simple docstring''' return super().__call__( self.replace_placeholder_tokens_in_text( lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , ) def UpperCamelCase ( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> List[str]: '''simple docstring''' return super().encode( self.replace_placeholder_tokens_in_text( lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
68
0
"""simple docstring""" from __future__ import annotations from typing import Any def lowercase ( _SCREAMING_SNAKE_CASE : list[Any] ): '''simple docstring''' create_state_space_tree(SCREAMING_SNAKE_CASE_ , [] , 0 ) def lowercase ( _SCREAMING_SNAKE_CASE : list[Any] , _SCREAMING_SNAKE_CASE : list[Any] , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if index == len(SCREAMING_SNAKE_CASE_ ): print(SCREAMING_SNAKE_CASE_ ) return create_state_space_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": __A : Tuple = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["A", "B", "C"]) generate_all_subsequences(seq)
260
from collections import deque from math import floor from random import random from time import time class a__ : """simple docstring""" def __init__( self ) -> Dict: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple: '''simple docstring''' if self.graph.get(lowercase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A__ = [[w, v]] if not self.graph.get(lowercase ): A__ = [] def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase , lowercase ) -> int: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Any: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' A__ = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self , lowercase=-2 ) -> str: '''simple docstring''' A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s A__ = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return sorted_nodes def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> int: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin class a__ : """simple docstring""" def __init__( self ) -> int: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A__ = [[w, v]] # add the other way if self.graph.get(lowercase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A__ = [[w, u]] def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) # the other way round if self.graph.get(lowercase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> str: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Dict: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin
68
0
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class a__ ( UpperCAmelCase__ ): def __get__( self : Optional[Any] , a : str , a : Optional[int]=None ): """simple docstring""" if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) __lowerCamelCase = '''__cached_''' + self.fget.__name__ __lowerCamelCase = getattr(a , a , a ) if cached is None: __lowerCamelCase = self.fget(a ) setattr(a , a , a ) return cached def __lowerCAmelCase ( UpperCamelCase__ ) -> Any: __lowerCamelCase = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f"""invalid truth value {val!r}""" ) def __lowerCAmelCase ( UpperCamelCase__ ) -> List[Any]: if is_torch_fx_proxy(SCREAMING_SNAKE_CASE_ ): return True if is_torch_available(): import torch if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(SCREAMING_SNAKE_CASE_ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(SCREAMING_SNAKE_CASE_ , (jnp.ndarray, Tracer) ): return True return isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple: return isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]: return _is_numpy(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]: import torch return isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Any: return False if not is_torch_available() else _is_torch(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( UpperCamelCase__ ) -> str: import torch return isinstance(SCREAMING_SNAKE_CASE_ , torch.device ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple: return False if not is_torch_available() else _is_torch_device(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple: import torch if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: return False return isinstance(SCREAMING_SNAKE_CASE_ , torch.dtype ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]: return False if not is_torch_available() else _is_torch_dtype(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]: import tensorflow as tf return isinstance(SCREAMING_SNAKE_CASE_ , tf.Tensor ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]: return False if not is_tf_available() else _is_tensorflow(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(SCREAMING_SNAKE_CASE_ , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(SCREAMING_SNAKE_CASE_ ) return type(SCREAMING_SNAKE_CASE_ ) == tf.Tensor def __lowerCAmelCase ( UpperCamelCase__ ) -> int: return False if not is_tf_available() else _is_tf_symbolic_tensor(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( UpperCamelCase__ ) -> str: import jax.numpy as jnp # noqa: F811 return isinstance(SCREAMING_SNAKE_CASE_ , jnp.ndarray ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Any: return False if not is_flax_available() else _is_jax(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( UpperCamelCase__ ) -> str: if isinstance(SCREAMING_SNAKE_CASE_ , (dict, UserDict) ): return {k: to_py_obj(SCREAMING_SNAKE_CASE_ ) for k, v in obj.items()} elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ): return [to_py_obj(SCREAMING_SNAKE_CASE_ ) for o in obj] elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): return obj.numpy().tolist() elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return obj.detach().cpu().tolist() elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return np.asarray(SCREAMING_SNAKE_CASE_ ).tolist() elif isinstance(SCREAMING_SNAKE_CASE_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]: if isinstance(SCREAMING_SNAKE_CASE_ , (dict, UserDict) ): return {k: to_numpy(SCREAMING_SNAKE_CASE_ ) for k, v in obj.items()} elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ): return np.array(SCREAMING_SNAKE_CASE_ ) elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): return obj.numpy() elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return obj.detach().cpu().numpy() elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return np.asarray(SCREAMING_SNAKE_CASE_ ) else: return obj class a__ ( UpperCAmelCase__ ): def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = fields(self ) # Safety and consistency checks if not len(a ): raise ValueError(f"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" ) __lowerCamelCase = getattr(self , class_fields[0].name ) __lowerCamelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(a ): if isinstance(a , a ): __lowerCamelCase = first_field.items() __lowerCamelCase = True else: try: __lowerCamelCase = iter(a ) __lowerCamelCase = True except TypeError: __lowerCamelCase = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(a ): if ( not isinstance(a , (list, tuple) ) or not len(a ) == 2 or not isinstance(element[0] , a ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute __lowerCamelCase = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self , element[0] , element[1] ) if element[1] is not None: __lowerCamelCase = element[1] elif first_field is not None: __lowerCamelCase = first_field else: for field in class_fields: __lowerCamelCase = getattr(self , field.name ) if v is not None: __lowerCamelCase = v def __delitem__( self : List[Any] , *a : str , **a : Optional[Any] ): """simple docstring""" raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *a : Union[str, Any] , **a : Optional[int] ): """simple docstring""" raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *a : int , **a : Dict ): """simple docstring""" raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any , *a : str , **a : str ): """simple docstring""" raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__( self : Dict , a : Optional[int] ): """simple docstring""" if isinstance(a , a ): __lowerCamelCase = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : List[str] , a : Dict , a : Optional[Any] ): """simple docstring""" if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(a , a ) super().__setattr__(a , a ) def __setitem__( self : Any , a : Optional[int] , a : int ): """simple docstring""" super().__setitem__(a , a ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(a , a ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" return tuple(self[k] for k in self.keys() ) class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ): @classmethod def SCREAMING_SNAKE_CASE__ ( cls : int , a : Tuple ): """simple docstring""" raise ValueError( f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class a__ ( UpperCAmelCase__ ): lowerCamelCase : Optional[int] ="longest" lowerCamelCase : str ="max_length" lowerCamelCase : List[str] ="do_not_pad" class a__ ( UpperCAmelCase__ ): lowerCamelCase : Dict ="pt" lowerCamelCase : Optional[Any] ="tf" lowerCamelCase : List[Any] ="np" lowerCamelCase : int ="jax" class a__ : def __init__( self : Dict , a : Optional[int] ): """simple docstring""" __lowerCamelCase = context_managers __lowerCamelCase = ExitStack() def __enter__( self : Union[str, Any] ): """simple docstring""" for context_manager in self.context_managers: self.stack.enter_context(a ) def __exit__( self : int , *a : Optional[int] , **a : Optional[int] ): """simple docstring""" self.stack.__exit__(*a , **a ) def __lowerCAmelCase ( UpperCamelCase__ ) -> List[Any]: __lowerCamelCase = infer_framework(SCREAMING_SNAKE_CASE_ ) if framework == "tf": __lowerCamelCase = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __lowerCamelCase = inspect.signature(model_class.forward ) # PyTorch models else: __lowerCamelCase = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def __lowerCAmelCase ( UpperCamelCase__ ) -> str: __lowerCamelCase = model_class.__name__ __lowerCamelCase = infer_framework(SCREAMING_SNAKE_CASE_ ) if framework == "tf": __lowerCamelCase = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __lowerCamelCase = inspect.signature(model_class.forward ) # PyTorch models else: __lowerCamelCase = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = "" , UpperCamelCase__ = "." ) -> Union[str, Any]: def _flatten_dict(UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__="." ): for k, v in d.items(): __lowerCamelCase = str(SCREAMING_SNAKE_CASE_ ) + delimiter + str(SCREAMING_SNAKE_CASE_ ) if parent_key else k if v and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): yield from flatten_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delimiter=SCREAMING_SNAKE_CASE_ ).items() else: yield key, v return dict(_flatten_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) @contextmanager def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = False ) -> Dict: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]: if is_numpy_array(SCREAMING_SNAKE_CASE_ ): return np.transpose(SCREAMING_SNAKE_CASE_ , axes=SCREAMING_SNAKE_CASE_ ) elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return array.T if axes is None else array.permute(*SCREAMING_SNAKE_CASE_ ) elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): import tensorflow as tf return tf.transpose(SCREAMING_SNAKE_CASE_ , perm=SCREAMING_SNAKE_CASE_ ) elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return jnp.transpose(SCREAMING_SNAKE_CASE_ , axes=SCREAMING_SNAKE_CASE_ ) else: raise ValueError(f"""Type not supported for transpose: {type(SCREAMING_SNAKE_CASE_ )}.""" ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int: if is_numpy_array(SCREAMING_SNAKE_CASE_ ): return np.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return array.reshape(*SCREAMING_SNAKE_CASE_ ) elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): import tensorflow as tf return tf.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return jnp.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: raise ValueError(f"""Type not supported for reshape: {type(SCREAMING_SNAKE_CASE_ )}.""" ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__=None ) -> str: if is_numpy_array(SCREAMING_SNAKE_CASE_ ): return np.squeeze(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ ) elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return array.squeeze() if axis is None else array.squeeze(dim=SCREAMING_SNAKE_CASE_ ) elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): import tensorflow as tf return tf.squeeze(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ ) elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return jnp.squeeze(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ ) else: raise ValueError(f"""Type not supported for squeeze: {type(SCREAMING_SNAKE_CASE_ )}.""" ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: if is_numpy_array(SCREAMING_SNAKE_CASE_ ): return np.expand_dims(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return array.unsqueeze(dim=SCREAMING_SNAKE_CASE_ ) elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): import tensorflow as tf return tf.expand_dims(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ ) elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return jnp.expand_dims(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ ) else: raise ValueError(f"""Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE_ )}.""" ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple: if is_numpy_array(SCREAMING_SNAKE_CASE_ ): return np.size(SCREAMING_SNAKE_CASE_ ) elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ): return array.numel() elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ): import tensorflow as tf return tf.size(SCREAMING_SNAKE_CASE_ ) elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ): return array.size else: raise ValueError(f"""Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE_ )}.""" ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: for key, value in auto_map.items(): if isinstance(SCREAMING_SNAKE_CASE_ , (tuple, list) ): __lowerCamelCase = [f"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: __lowerCamelCase = f"""{repo_id}--{value}""" return auto_map def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]: for base_class in inspect.getmro(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase = base_class.__module__ __lowerCamelCase = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f"""Could not infer framework from class {model_class}.""" )
67
import datasets from .evaluate import evaluate lowerCAmelCase__ = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ lowerCAmelCase__ = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ lowerCAmelCase__ = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": { "id": datasets.Value("string" ), "prediction_text": datasets.features.Sequence(datasets.Value("string" ) ), }, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , ) def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} A__ = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] A__ = evaluate(dataset=lowercase , predictions=lowercase ) return score
68
0
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures _A = logging.get_logger(__name__) @dataclass class _lowerCamelCase : _lowerCamelCase :Dict = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} ) _lowerCamelCase :Any = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) _lowerCamelCase :Dict = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) _lowerCamelCase :Optional[Any] = field( default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def _lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" lowerCAmelCase__ : Tuple = self.task_name.lower() class _lowerCamelCase ( a_ ): _lowerCamelCase :Optional[Any] = "train" _lowerCamelCase :int = "dev" _lowerCamelCase :int = "test" class _lowerCamelCase ( a_ ): _lowerCamelCase :Optional[Any] = 42 _lowerCamelCase :List[Any] = 42 _lowerCamelCase :Any = 42 def __init__( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Dict = None , UpperCamelCase : str = Split.train , UpperCamelCase : Union[str, Any] = None , ) -> int: """simple docstring""" warnings.warn( """This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , UpperCamelCase , ) lowerCAmelCase__ : Tuple = args lowerCAmelCase__ : Optional[int] = glue_processors[args.task_name]() lowerCAmelCase__ : Any = glue_output_modes[args.task_name] if isinstance(UpperCamelCase , UpperCamelCase ): try: lowerCAmelCase__ : Optional[Any] = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) # Load data features from cache or dataset file lowerCAmelCase__ : Union[str, Any] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , ) lowerCAmelCase__ : Optional[int] = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = label_list[2], label_list[1] lowerCAmelCase__ : Tuple = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCAmelCase__ : Union[str, Any] = cached_features_file + """.lock""" with FileLock(UpperCamelCase ): if os.path.exists(UpperCamelCase ) and not args.overwrite_cache: lowerCAmelCase__ : Any = time.time() lowerCAmelCase__ : List[str] = torch.load(UpperCamelCase ) logger.info( f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) else: logger.info(f"""Creating features from dataset file at {args.data_dir}""" ) if mode == Split.dev: lowerCAmelCase__ : str = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowerCAmelCase__ : Any = self.processor.get_test_examples(args.data_dir ) else: lowerCAmelCase__ : int = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowerCAmelCase__ : Optional[Any] = examples[:limit_length] lowerCAmelCase__ : Optional[int] = glue_convert_examples_to_features( UpperCamelCase , UpperCamelCase , max_length=args.max_seq_length , label_list=UpperCamelCase , output_mode=self.output_mode , ) lowerCAmelCase__ : Optional[int] = time.time() torch.save(self.features , UpperCamelCase ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self : str ) -> Union[str, Any]: """simple docstring""" return len(self.features ) def __getitem__( self : List[Any] , UpperCamelCase : Union[str, Any] ) -> InputFeatures: """simple docstring""" return self.features[i] def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" return self.label_list
242
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int: '''simple docstring''' A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = CLIPConfig __lowerCamelCase = ['CLIPEncoderLayer'] def __init__( self , lowercase ) -> Optional[int]: '''simple docstring''' super().__init__(lowercase ) A__ = CLIPVisionModel(config.vision_config ) A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase ) A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase ) @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy() A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy() A__ = [] A__ = image_embeds.shape[0] for i in range(lowercase ): A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A__ = special_cos_dist[i][concept_idx] A__ = self.special_care_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) A__ = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A__ = cos_dist[i][concept_idx] A__ = self.concept_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowercase ) result.append(lowercase ) A__ = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) A__ = cosine_distance(lowercase , self.special_care_embeds ) A__ = cosine_distance(lowercase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 A__ = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A__ = torch.any(special_scores > 0 , dim=1 ) A__ = special_care * 0.01 A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A__ = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
68
0
import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = inspect.getfile(accelerate.test_utils ) lowerCamelCase : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) lowerCamelCase : Tuple = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = F'''\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '''.split() lowerCamelCase : List[str] = [sys.executable] + distributed_args execute_subprocess_async(__magic_name__ , env=os.environ.copy() )
287
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
68
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
315
import string def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None: '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): A__ = "" for symbol in message: if symbol in string.ascii_uppercase: A__ = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ ) A__ = num - key if num < 0: A__ = num + len(string.ascii_uppercase ) A__ = translated + string.ascii_uppercase[num] else: A__ = translated + symbol print(F'Decryption using Key #{key}: {translated}' ) def lowerCAmelCase__ ( ) -> None: '''simple docstring''' A__ = input("Encrypted message: " ) A__ = message.upper() decrypt(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
68
0
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
57
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = SpeechTaTokenizer __lowerCamelCase = False __lowerCamelCase = True def UpperCamelCase ( self ) -> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = SpeechTaTokenizer(lowercase ) A__ = AddedToken("<mask>" , lstrip=lowercase , rstrip=lowercase ) A__ = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = "this is a test" A__ = "this is a test" return input_text, output_text def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ) -> Optional[Any]: '''simple docstring''' A__ , A__ = self.get_input_output_texts(lowercase ) A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) return text, ids def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = "<pad>" A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-4] , "œ" ) self.assertEqual(vocab_keys[-2] , "<mask>" ) self.assertEqual(vocab_keys[-1] , "<ctc_blank>" ) self.assertEqual(len(lowercase ) , 81 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"] A__ = tokenizer.add_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size + len(lowercase ) ) A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} A__ = tokenizer.add_special_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size_a + len(lowercase ) ) A__ = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ = tokenizer.tokenize("This is a test" ) # fmt: off self.assertListEqual(lowercase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) A__ = tokenizer.convert_tokens_to_ids(lowercase ) # fmt: off self.assertListEqual(lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on A__ = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off A__ = { "input_ids": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowercase , )
68
0
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. lowercase_ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class a_ ( unittest.TestCase ): '''simple docstring''' UpperCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: UpperCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: UpperCamelCase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" , top_k=2 ) self.assertEqual( nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] ) _SCREAMING_SNAKE_CASE = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 ) self.assertEqual( nested_simplify(A ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" , top_k=1 ) self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) # Legacy behavior _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" , return_all_scores=A ) self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" , return_all_scores=A ) self.assertEqual( nested_simplify(A ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] ) _SCREAMING_SNAKE_CASE = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=A ) self.assertEqual( nested_simplify(A ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) _SCREAMING_SNAKE_CASE = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=A ) self.assertEqual( nested_simplify(A ) , [ {"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_0""", """score""": 0.504}, ] , ) @require_torch def snake_case_( self ) -> Tuple: import torch _SCREAMING_SNAKE_CASE = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @require_tf def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @slow @require_torch def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE = pipeline("""text-classification""" ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) _SCREAMING_SNAKE_CASE = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) _SCREAMING_SNAKE_CASE = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) @slow @require_tf def snake_case_( self ) -> List[str]: _SCREAMING_SNAKE_CASE = pipeline("""text-classification""" , framework="""tf""" ) _SCREAMING_SNAKE_CASE = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) _SCREAMING_SNAKE_CASE = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) _SCREAMING_SNAKE_CASE = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(A ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) def snake_case_( self , A , A , A ) -> List[str]: _SCREAMING_SNAKE_CASE = TextClassificationPipeline(model=A , tokenizer=A ) return text_classifier, ["HuggingFace is in", "This is another test"] def snake_case_( self , A , A ) -> List[str]: _SCREAMING_SNAKE_CASE = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 _SCREAMING_SNAKE_CASE = """HuggingFace is in""" _SCREAMING_SNAKE_CASE = text_classifier(A ) self.assertEqual(nested_simplify(A ) , [{"""label""": ANY(A ), """score""": ANY(A )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) _SCREAMING_SNAKE_CASE = ["""HuggingFace is in """, """Paris is in France"""] _SCREAMING_SNAKE_CASE = text_classifier(A ) self.assertEqual( nested_simplify(A ) , [{"""label""": ANY(A ), """score""": ANY(A )}, {"""label""": ANY(A ), """score""": ANY(A )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format _SCREAMING_SNAKE_CASE = text_classifier(A , top_k=A ) _SCREAMING_SNAKE_CASE = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(A ) , [[{"""label""": ANY(A ), """score""": ANY(A )}] * N, [{"""label""": ANY(A ), """score""": ANY(A )}] * N] , ) _SCREAMING_SNAKE_CASE = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""} _SCREAMING_SNAKE_CASE = text_classifier(A ) self.assertEqual( nested_simplify(A ) , {"""label""": ANY(A ), """score""": ANY(A )} , ) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. _SCREAMING_SNAKE_CASE = [["""HuggingFace is in """, """Paris is in France"""]] with self.assertRaises(A ): text_classifier(A ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility _SCREAMING_SNAKE_CASE = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(A ) , [{"""label""": ANY(A ), """score""": ANY(A )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
58
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> List[str]: '''simple docstring''' A__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A__ = F'{src_lang}-{tgt_lang}' A__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(F'Generating {path}' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent lowerCAmelCase__ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = model_name.split("""-""") lowerCAmelCase__ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
68
0
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __lowerCAmelCase = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=8 ): _snake_case = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _snake_case = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]: super().__init__() self.register_modules( unet=UpperCAmelCase , scheduler=UpperCAmelCase , movq=UpperCAmelCase , ) _snake_case = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: if latents is None: _snake_case = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase ) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) _snake_case = latents.to(UpperCAmelCase ) _snake_case = latents * scheduler.init_noise_sigma return latents def lowercase (self , UpperCAmelCase=0 ) -> Union[str, Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _snake_case = torch.device(f"""cuda:{gpu_id}""" ) _snake_case = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCAmelCase , UpperCAmelCase ) def lowercase (self , UpperCAmelCase=0 ) -> Dict: if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) _snake_case = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=UpperCAmelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _snake_case = None for cpu_offloaded_model in [self.unet, self.movq]: _snake_case, _snake_case = cpu_offload_with_hook(UpperCAmelCase , UpperCAmelCase , prev_module_hook=UpperCAmelCase ) # We'll offload the last model manually. _snake_case = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowercase (self ) -> Optional[Any]: if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCAmelCase , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCAmelCase ) def __call__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 512 , UpperCAmelCase = 512 , UpperCAmelCase = 100 , UpperCAmelCase = 4.0 , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , ) -> int: _snake_case = self._execution_device _snake_case = guidance_scale > 1.0 if isinstance(UpperCAmelCase , UpperCAmelCase ): _snake_case = torch.cat(UpperCAmelCase , dim=0 ) if isinstance(UpperCAmelCase , UpperCAmelCase ): _snake_case = torch.cat(UpperCAmelCase , dim=0 ) if isinstance(UpperCAmelCase , UpperCAmelCase ): _snake_case = torch.cat(UpperCAmelCase , dim=0 ) _snake_case = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: _snake_case = image_embeds.repeat_interleave(UpperCAmelCase , dim=0 ) _snake_case = negative_image_embeds.repeat_interleave(UpperCAmelCase , dim=0 ) _snake_case = hint.repeat_interleave(UpperCAmelCase , dim=0 ) _snake_case = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase ) _snake_case = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase ) self.scheduler.set_timesteps(UpperCAmelCase , device=UpperCAmelCase ) _snake_case = self.scheduler.timesteps _snake_case = self.movq.config.latent_channels _snake_case, _snake_case = downscale_height_and_width(UpperCAmelCase , UpperCAmelCase , self.movq_scale_factor ) # create initial latent _snake_case = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCAmelCase ) ): # expand the latents if we are doing classifier free guidance _snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _snake_case = {"""image_embeds""": image_embeds, """hint""": hint} _snake_case = self.unet( sample=UpperCAmelCase , timestep=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , added_cond_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0] if do_classifier_free_guidance: _snake_case, _snake_case = noise_pred.split(latents.shape[1] , dim=1 ) _snake_case, _snake_case = noise_pred.chunk(2 ) _snake_case, _snake_case = variance_pred.chunk(2 ) _snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _snake_case = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _snake_case, _snake_case = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _snake_case = self.scheduler.step( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase , )[0] # post-processing _snake_case = self.movq.decode(UpperCAmelCase , force_not_quantize=UpperCAmelCase )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: _snake_case = image * 0.5 + 0.5 _snake_case = image.clamp(0 , 1 ) _snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _snake_case = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
341
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = feature_size A__ = sampling_rate A__ = padding_value A__ = kwargs.pop("padding_side" , "right" ) A__ = kwargs.pop("return_attention_mask" , lowercase ) super().__init__(**lowercase ) def UpperCamelCase ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature: '''simple docstring''' if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): A__ = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F' to this method that includes {self.model_input_names[0]}, but you provided' F' {list(processed_features.keys() )}' ) A__ = processed_features[self.model_input_names[0]] A__ = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase ) == 0: if return_attention_mask: A__ = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch A__ = required_input[0] if isinstance(lowercase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. A__ = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase ): A__ = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase ): A__ = "tf" elif is_torch_tensor(lowercase ): A__ = "pt" elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ): A__ = "np" else: raise ValueError( F'type of {first_element} unknown: {type(lowercase )}. ' "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): A__ = to_numpy(lowercase ) else: A__ = [to_numpy(lowercase ) for v in value] # Convert padding_strategy in PaddingStrategy A__ = self._get_padding_strategies(padding=lowercase , max_length=lowercase ) A__ = processed_features[self.model_input_names[0]] A__ = len(lowercase ) if not all(len(lowercase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) A__ = [] for i in range(lowercase ): A__ = {k: v[i] for k, v in processed_features.items()} # truncation A__ = self._truncate( lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , ) truncated_inputs.append(lowercase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) A__ = PaddingStrategy.MAX_LENGTH A__ = {} for i in range(lowercase ): # padding A__ = self._pad( truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , ) for key, value in outputs.items(): if key not in batch_outputs: A__ = [] if value.dtype is np.dtype(np.floataa ): A__ = value.astype(np.floataa ) batch_outputs[key].append(lowercase ) return BatchFeature(lowercase , tensor_type=lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict: '''simple docstring''' A__ = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: A__ = len(lowercase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: A__ = np.ones(len(lowercase ) , dtype=np.intaa ) if needs_to_be_padded: A__ = max_length - len(lowercase ) if self.padding_side == "right": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (0, difference) ) A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (difference, 0) ) A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]: '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) A__ = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = len(lowercase ) > max_length if needs_to_be_truncated: A__ = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: A__ = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase ( self , lowercase=False , lowercase=None ) -> Any: '''simple docstring''' if padding is not False: if padding is True: A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase , lowercase ): A__ = PaddingStrategy(lowercase ) elif isinstance(lowercase , lowercase ): A__ = padding else: A__ = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
68
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowercase_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): _a = ["""pixel_values"""] def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BICUBIC , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , **lowerCAmelCase , ) -> None: '''simple docstring''' super().__init__(**lowerCAmelCase ) _lowercase =size if size is not None else {'shortest_edge': 224} _lowercase =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase ) _lowercase =crop_size if crop_size is not None else {'height': 224, 'width': 224} _lowercase =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase , param_name='crop_size' ) _lowercase =do_resize _lowercase =size _lowercase =resample _lowercase =do_center_crop _lowercase =crop_size _lowercase =do_rescale _lowercase =rescale_factor _lowercase =do_normalize _lowercase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN _lowercase =image_std if image_std is not None else OPENAI_CLIP_STD _lowercase =do_convert_rgb def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BICUBIC , lowerCAmelCase = None , **lowerCAmelCase , ) -> np.ndarray: '''simple docstring''' _lowercase =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) _lowercase =get_resize_output_image_size(lowerCAmelCase , size=size['shortest_edge'] , default_to_square=lowerCAmelCase ) return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ) -> np.ndarray: '''simple docstring''' _lowercase =get_size_dict(lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowerCAmelCase , size=(size['height'], size['width']) , data_format=lowerCAmelCase , **lowerCAmelCase ) def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ) -> int: '''simple docstring''' return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ) -> np.ndarray: '''simple docstring''' return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ) -> PIL.Image.Image: '''simple docstring''' _lowercase =do_resize if do_resize is not None else self.do_resize _lowercase =size if size is not None else self.size _lowercase =get_size_dict(lowerCAmelCase , param_name='size' , default_to_square=lowerCAmelCase ) _lowercase =resample if resample is not None else self.resample _lowercase =do_center_crop if do_center_crop is not None else self.do_center_crop _lowercase =crop_size if crop_size is not None else self.crop_size _lowercase =get_size_dict(lowerCAmelCase , param_name='crop_size' , default_to_square=lowerCAmelCase ) _lowercase =do_rescale if do_rescale is not None else self.do_rescale _lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase =do_normalize if do_normalize is not None else self.do_normalize _lowercase =image_mean if image_mean is not None else self.image_mean _lowercase =image_std if image_std is not None else self.image_std _lowercase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _lowercase =make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _lowercase =[convert_to_rgb(lowerCAmelCase ) for image in images] # All transformations expect numpy arrays. _lowercase =[to_numpy_array(lowerCAmelCase ) for image in images] if do_resize: _lowercase =[self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images] if do_center_crop: _lowercase =[self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images] if do_rescale: _lowercase =[self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images] if do_normalize: _lowercase =[self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images] _lowercase =[to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images] _lowercase ={'pixel_values': images} return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
205
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
0
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging A: List[str] = logging.get_logger(__name__) A: Tuple = "▁" A: Tuple = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", "tokenizer_config_file": "tokenizer_config.json", } A: Union[str, Any] = { "vocab_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json", }, "spm_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model", }, "tokenizer_config_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json", }, } A: Optional[Any] = { "facebook/m2m100_418M": 1_0_2_4, } # fmt: off A: Dict = { "m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"], "wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"] } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES __lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : List[str] = ['input_ids', 'attention_mask'] __lowerCAmelCase : List[str] = [] __lowerCAmelCase : Any = [] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="m2m100" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=8 , **_SCREAMING_SNAKE_CASE , ) -> None: '''simple docstring''' UpperCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs UpperCAmelCase : List[Any] = language_codes UpperCAmelCase : int = FAIRSEQ_LANGUAGE_CODES[language_codes] UpperCAmelCase : Optional[Any] = {lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code} UpperCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(_SCREAMING_SNAKE_CASE ) for lang_code in fairseq_language_code if self.get_lang_token(_SCREAMING_SNAKE_CASE ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , language_codes=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) UpperCAmelCase : List[str] = vocab_file UpperCAmelCase : Dict = load_json(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = {v: k for k, v in self.encoder.items()} UpperCAmelCase : List[Any] = spm_file UpperCAmelCase : Union[str, Any] = load_spm(_SCREAMING_SNAKE_CASE , self.sp_model_kwargs ) UpperCAmelCase : List[Any] = len(self.encoder ) UpperCAmelCase : int = { self.get_lang_token(_SCREAMING_SNAKE_CASE ): self.encoder_size + i for i, lang_code in enumerate(_SCREAMING_SNAKE_CASE ) } UpperCAmelCase : Any = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_SCREAMING_SNAKE_CASE )} UpperCAmelCase : List[str] = {v: k for k, v in self.lang_token_to_id.items()} UpperCAmelCase : Optional[Any] = src_lang if src_lang is not None else """en""" UpperCAmelCase : Optional[int] = tgt_lang UpperCAmelCase : Optional[int] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) UpperCAmelCase : Dict = num_madeup_words @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return len(self.encoder ) + len(self.lang_token_to_id ) @property def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None: '''simple docstring''' UpperCAmelCase : Any = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder[self.unk_token] ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[str] = [] UpperCAmelCase : Dict = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token UpperCAmelCase : List[str] = [] else: current_sub_tokens.append(_SCREAMING_SNAKE_CASE ) out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) return out_string.strip() def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = [1] * len(self.prefix_tokens ) UpperCAmelCase : Optional[Any] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Dict = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Dict: '''simple docstring''' UpperCAmelCase : str = self.__dict__.copy() UpperCAmelCase : Optional[Any] = None return state def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> None: '''simple docstring''' UpperCAmelCase : Any = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCAmelCase : str = {} UpperCAmelCase : Tuple = load_spm(self.spm_file , self.sp_model_kwargs ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: '''simple docstring''' UpperCAmelCase : List[str] = Path(_SCREAMING_SNAKE_CASE ) if not save_dir.is_dir(): raise OSError(F"{save_directory} should be a directory" ) UpperCAmelCase : Union[str, Any] = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) UpperCAmelCase : Optional[Any] = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , _SCREAMING_SNAKE_CASE ) if os.path.abspath(self.spm_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.spm_file ): with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi: UpperCAmelCase : Tuple = self.sp_model.serialized_model_proto() fi.write(_SCREAMING_SNAKE_CASE ) return (str(_SCREAMING_SNAKE_CASE ), str(_SCREAMING_SNAKE_CASE )) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "en" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "ro" , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding: '''simple docstring''' UpperCAmelCase : Optional[Any] = src_lang UpperCAmelCase : str = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) UpperCAmelCase : List[str] = src_lang UpperCAmelCase : List[Any] = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = self.get_lang_id(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None: '''simple docstring''' UpperCAmelCase : Dict = self.get_lang_token(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = self.lang_token_to_id[lang_token] UpperCAmelCase : Optional[Any] = [self.cur_lang_id] UpperCAmelCase : Union[str, Any] = [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None: '''simple docstring''' UpperCAmelCase : List[str] = self.get_lang_token(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = self.lang_token_to_id[lang_token] UpperCAmelCase : List[Any] = [self.cur_lang_id] UpperCAmelCase : Dict = [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' return self.lang_code_to_token[lang] def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_lang_token(_SCREAMING_SNAKE_CASE ) return self.lang_token_to_id[lang_token] def _snake_case ( UpperCamelCase : str , UpperCamelCase : Dict[str, Any] ): UpperCAmelCase : Dict = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE_ ) spm.Load(str(SCREAMING_SNAKE_CASE_ ) ) return spm def _snake_case ( UpperCamelCase : str ): with open(SCREAMING_SNAKE_CASE_ , """r""" ) as f: return json.load(SCREAMING_SNAKE_CASE_ ) def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ): with open(SCREAMING_SNAKE_CASE_ , """w""" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=2 )
109
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'gpt_neox_japanese' def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict: '''simple docstring''' super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_multiple_size A__ = hidden_act A__ = rotary_pct A__ = rotary_emb_base A__ = initializer_range A__ = layer_norm_eps A__ = use_cache A__ = attention_dropout A__ = hidden_dropout
68
0
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCamelCase__ : List[Any] = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json' with io.open(filename, 'r', encoding='utf-8') as f: lowerCamelCase__ : List[str] = json.load(f) @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : List[Any] ): return FSMTTokenizer.from_pretrained(_lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[Any] ): SCREAMING_SNAKE_CASE_ = FSMTForConditionalGeneration.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 26.0], ['ru-en', 22.0], ['en-de', 22.0], ['de-en', 29.0], ] ) @slow def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ): SCREAMING_SNAKE_CASE_ = F"facebook/wmt19-{pair}" SCREAMING_SNAKE_CASE_ = self.get_tokenizer(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.get_model(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = bleu_data[pair]['src'] SCREAMING_SNAKE_CASE_ = bleu_data[pair]['tgt'] SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase , return_tensors='pt' , truncation=_lowerCAmelCase , padding='longest' ).to(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode( _lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = calculate_bleu(_lowerCAmelCase , _lowerCAmelCase ) print(_lowerCAmelCase ) self.assertGreaterEqual(scores['bleu'] , _lowerCAmelCase )
225
import warnings from functools import wraps from typing import Callable def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable ) -> Callable: '''simple docstring''' @wraps(SCREAMING_SNAKE_CASE_ ) def _inner_fn(*SCREAMING_SNAKE_CASE_: int , **SCREAMING_SNAKE_CASE_: Union[str, Any] ): warnings.warn( (F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE_ , ) return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return _inner_fn
68
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _a ( unittest.TestCase): """simple docstring""" def __init__( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str]=7 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : List[str]=1_8 , __UpperCamelCase : Union[str, Any]=3_0 , __UpperCamelCase : int=4_0_0 , __UpperCamelCase : str=True , __UpperCamelCase : Dict=None , __UpperCamelCase : int=True , )->List[str]: _UpperCAmelCase = size if size is not None else {'''height''': 1_8, '''width''': 1_8} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = apply_ocr def lowercase__ ( self : Optional[Any] )->Union[str, Any]: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _a ( lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def lowercase__ ( self : Tuple )->str: _UpperCAmelCase = LayoutLMvaImageProcessingTester(self ) @property def lowercase__ ( self : Any )->List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self : int )->str: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__UpperCamelCase , '''size''' ) ) self.assertTrue(hasattr(__UpperCamelCase , '''apply_ocr''' ) ) def lowercase__ ( self : Optional[Any] )->str: _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} ) def lowercase__ ( self : Optional[Any] )->Optional[Any]: pass def lowercase__ ( self : Union[str, Any] )->Optional[Any]: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , __UpperCamelCase ) self.assertIsInstance(encoding.boxes , __UpperCamelCase ) # Test batched _UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched _UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def lowercase__ ( self : Dict )->Union[str, Any]: _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched _UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def lowercase__ ( self : Tuple )->Tuple: _UpperCAmelCase = LayoutLMvaImageProcessor() from datasets import load_dataset _UpperCAmelCase = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) _UpperCAmelCase = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) _UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 _UpperCAmelCase = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 _UpperCAmelCase = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __UpperCamelCase ) self.assertListEqual(encoding.boxes , __UpperCamelCase ) # with apply_OCR = False _UpperCAmelCase = LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase ) _UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
260
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) lowerCAmelCase__ = """\ Text data. Second line of data.""" lowerCAmelCase__ = """file""" @pytest.fixture(scope="session" ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") A__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" ) with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> List[str]: '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return FILE_PATH @pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Any: '''simple docstring''' A__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} A__ = input_paths[compression_format] A__ = tmp_path / "cache" A__ = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" , [True, False] ) @pytest.mark.parametrize("default_cache_dir" , [True, False] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Dict: '''simple docstring''' A__ = "custom_cache" A__ = "custom_extracted_dir" A__ = tmp_path / "custom_extracted_path" if default_extracted: A__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) ) A__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) A__ = xz_file A__ = ( DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) ) A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]: '''simple docstring''' A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file # relative path A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]: '''simple docstring''' A__ = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) # relative path A__ = "./__missing_file__.txt" with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]: '''simple docstring''' A__ = get_from_cache(F'tmp://{tmpfs_file}' ) with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( ) -> List[Any]: '''simple docstring''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_head("s3://huggingface.co" )
68
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCAmelCase =logging.get_logger(__name__) __UpperCAmelCase ={ "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowerCamelCase : List[str] ="swin" lowerCamelCase : Optional[Any] ={ "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : str , a : Union[str, Any]=2_24 , a : List[str]=4 , a : Optional[Any]=3 , a : Tuple=96 , a : Union[str, Any]=[2, 2, 6, 2] , a : Dict=[3, 6, 12, 24] , a : List[str]=7 , a : str=4.0 , a : Optional[Any]=True , a : Tuple=0.0 , a : List[str]=0.0 , a : Optional[Any]=0.1 , a : Optional[Any]="gelu" , a : List[str]=False , a : Dict=0.02 , a : Any=1e-5 , a : Optional[Any]=32 , a : int=None , a : Dict=None , **a : Union[str, Any] , ): """simple docstring""" super().__init__(**a ) __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = embed_dim __lowerCamelCase = depths __lowerCamelCase = len(a ) __lowerCamelCase = num_heads __lowerCamelCase = window_size __lowerCamelCase = mlp_ratio __lowerCamelCase = qkv_bias __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = drop_path_rate __lowerCamelCase = hidden_act __lowerCamelCase = use_absolute_embeddings __lowerCamelCase = layer_norm_eps __lowerCamelCase = initializer_range __lowerCamelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowerCamelCase = int(embed_dim * 2 ** (len(a ) - 1) ) __lowerCamelCase = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(a ) + 1 )] __lowerCamelCase , __lowerCamelCase = get_aligned_output_features_output_indices( out_features=a , out_indices=a , stage_names=self.stage_names ) class a__ ( UpperCAmelCase__ ): lowerCamelCase : int =version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return 1e-4
67
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class a__ : """simple docstring""" __lowerCamelCase = BlenderbotSmallConfig __lowerCamelCase = {} __lowerCamelCase = 'gelu' def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A__ = tf.concat([input_ids, eos_tensor] , axis=1 ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder() A__ = inputs_dict["input_ids"] A__ = input_ids[:1, :] A__ = inputs_dict["attention_mask"][:1, :] A__ = inputs_dict["head_mask"] A__ = 1 # first forward pass A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) A__ , A__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A__ = tf.concat([input_ids, next_tokens] , axis=-1 ) A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A__ = model(lowercase , attention_mask=lowercase )[0] A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A__ = output_from_no_past[:, -3:, random_slice_idx] A__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = TFBlenderbotSmallModelTester(self ) A__ = ConfigTester(self , config_class=lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_tokenizers @require_tf class a__ ( unittest.TestCase ): """simple docstring""" __lowerCamelCase = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] __lowerCamelCase = 'facebook/blenderbot_small-90M' @cached_property def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.tokenizer(self.src_text , return_tensors="tf" ) A__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , ) A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
68
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _A = { """configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""], """tokenization_roc_bert""": ["""RoCBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """RoCBertForCausalLM""", """RoCBertForMaskedLM""", """RoCBertForMultipleChoice""", """RoCBertForPreTraining""", """RoCBertForQuestionAnswering""", """RoCBertForSequenceClassification""", """RoCBertForTokenClassification""", """RoCBertLayer""", """RoCBertModel""", """RoCBertPreTrainedModel""", """load_tf_weights_in_roc_bert""", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys _A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
242
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = ['pixel_values'] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None: '''simple docstring''' super().__init__(**lowercase ) A__ = size if size is not None else {"height": 384, "width": 384} A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = do_resize A__ = size A__ = resample A__ = do_rescale A__ = rescale_factor A__ = do_normalize A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A__ = image_std if image_std is not None else OPENAI_CLIP_STD A__ = do_convert_rgb def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' A__ = get_size_dict(lowercase , default_to_square=lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' ) A__ = (size["height"], size["width"]) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[Any]: '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image: '''simple docstring''' A__ = do_resize if do_resize is not None else self.do_resize A__ = resample if resample is not None else self.resample A__ = do_rescale if do_rescale is not None else self.do_rescale A__ = rescale_factor if rescale_factor is not None else self.rescale_factor A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = image_mean if image_mean is not None else self.image_mean A__ = image_std if image_std is not None else self.image_std A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A__ = size if size is not None else self.size A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: A__ = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A__ = [to_numpy_array(lowercase ) for image in images] if do_resize: A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_rescale: A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A__ = BatchFeature(data={"pixel_values": images} , tensor_type=lowercase ) return encoded_outputs
68
0
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class A__ : def __init__( self , __magic_name__ , ): lowerCamelCase : Tuple = parent lowerCamelCase : str = 1_3 lowerCamelCase : str = 7 lowerCamelCase : int = True lowerCamelCase : Tuple = True lowerCamelCase : Union[str, Any] = False lowerCamelCase : Tuple = True lowerCamelCase : Dict = 9_9 lowerCamelCase : Optional[int] = 3_2 lowerCamelCase : Dict = 2 lowerCamelCase : Optional[int] = 4 lowerCamelCase : Tuple = 3_7 lowerCamelCase : Tuple = """gelu""" lowerCamelCase : List[Any] = 0.1 lowerCamelCase : Any = 0.1 lowerCamelCase : List[str] = 5_1_2 lowerCamelCase : Optional[int] = 1_6 lowerCamelCase : Optional[int] = 2 lowerCamelCase : Optional[int] = 0.02 lowerCamelCase : Dict = 3 lowerCamelCase : Union[str, Any] = 4 lowerCamelCase : Optional[Any] = None def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Union[str, Any] = None if self.use_input_mask: lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase : List[str] = None lowerCamelCase : Any = None lowerCamelCase : Dict = None if self.use_labels: lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : List[str] = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = TFDistilBertModel(config=__magic_name__ ) lowerCamelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} lowerCamelCase : List[str] = model(__magic_name__ ) lowerCamelCase : List[str] = [input_ids, input_mask] lowerCamelCase : Any = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Tuple = TFDistilBertForMaskedLM(config=__magic_name__ ) lowerCamelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask} lowerCamelCase : Any = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = TFDistilBertForQuestionAnswering(config=__magic_name__ ) lowerCamelCase : Optional[int] = { """input_ids""": input_ids, """attention_mask""": input_mask, } lowerCamelCase : int = model(__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Optional[Any] = self.num_labels lowerCamelCase : Any = TFDistilBertForSequenceClassification(__magic_name__ ) lowerCamelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask} lowerCamelCase : List[str] = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Tuple = self.num_choices lowerCamelCase : str = TFDistilBertForMultipleChoice(__magic_name__ ) lowerCamelCase : List[Any] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : int = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, } lowerCamelCase : Any = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Optional[int] = self.num_labels lowerCamelCase : str = TFDistilBertForTokenClassification(__magic_name__ ) lowerCamelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask} lowerCamelCase : Union[str, Any] = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self ): lowerCamelCase : str = self.prepare_config_and_inputs() ((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Tuple = config_and_inputs lowerCamelCase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Dict = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) _UpperCAmelCase : int = ( { """feature-extraction""": TFDistilBertModel, """fill-mask""": TFDistilBertForMaskedLM, """question-answering""": TFDistilBertForQuestionAnswering, """text-classification""": TFDistilBertForSequenceClassification, """token-classification""": TFDistilBertForTokenClassification, """zero-shot""": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) _UpperCAmelCase : Dict = False _UpperCAmelCase : Optional[Any] = False def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = TFDistilBertModelTester(self ) lowerCamelCase : List[str] = ConfigTester(self , config_class=__magic_name__ , dim=3_7 ) def UpperCamelCase__ ( self ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__magic_name__ ) @slow def UpperCamelCase__ ( self ): for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): lowerCamelCase : int = TFDistilBertModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @require_tf class A__ ( unittest.TestCase): @slow def UpperCamelCase__ ( self ): lowerCamelCase : int = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) lowerCamelCase : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase : Dict = model(__magic_name__ )[0] lowerCamelCase : Optional[Any] = [1, 6, 7_6_8] self.assertEqual(output.shape , __magic_name__ ) lowerCamelCase : Tuple = tf.constant( [ [ [0.19_261_885, -0.13_732_955, 0.4_119_799], [0.22_150_156, -0.07_422_661, 0.39_037_204], [0.22_756_018, -0.0_896_414, 0.3_701_467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1e-4 )
287
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCAmelCase__ = """hf-internal-testing/tiny-random-bert""" lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") lowerCAmelCase__ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = cached_file(lowercase , lowercase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(lowercase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(lowercase , lowercase ) ) ) with open(os.path.join(lowercase , "refs" , "main" ) ) as f: A__ = f.read() self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) ) self.assertTrue(os.path.isfile(lowercase ) ) # File is cached at the same place the second time. A__ = cached_file(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) # Using a specific revision to test the full commit hash. A__ = cached_file(lowercase , lowercase , revision="9b8c223" ) self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ): A__ = cached_file("tiny-random-bert" , lowercase ) with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ): A__ = cached_file(lowercase , lowercase , revision="aaaa" ) with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ): A__ = cached_file(lowercase , "conf" ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ): A__ = cached_file(lowercase , "conf" ) with open(os.path.join(lowercase , "refs" , "main" ) ) as f: A__ = f.read() self.assertTrue(os.path.isfile(os.path.join(lowercase , ".no_exist" , lowercase , "conf" ) ) ) A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A__ = cached_file(lowercase , "conf" , local_files_only=lowercase , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A__ = mock.Mock() A__ = 500 A__ = {} A__ = HTTPError A__ = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=lowercase ) as mock_head: A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_connection_errors=lowercase ) self.assertIsNone(lowercase ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) def UpperCamelCase ( self ) -> str: '''simple docstring''' self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ): get_file_from_repo("bert-base-case" , lowercase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ): get_file_from_repo("bert-base-cased" , lowercase , revision="ahaha" ) A__ = get_file_from_repo("bert-base-cased" , lowercase ) # The name is the cached name which is not very easy to test, so instead we load the content. A__ = json.loads(open(lowercase , "r" ).read() ) self.assertEqual(config["hidden_size"] , 768 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: A__ = Path(lowercase ) / "a.txt" filename.touch() self.assertEqual(get_file_from_repo(lowercase , "a.txt" ) , str(lowercase ) ) self.assertIsNone(get_file_from_repo(lowercase , "b.txt" ) )
68
0
"""simple docstring""" from itertools import count def _snake_case ( _snake_case : int = 50 ) -> int: '''simple docstring''' _A = [1] * min_block_length for n in count(SCREAMING_SNAKE_CASE_ ): fill_count_functions.append(1 ) for block_length in range(SCREAMING_SNAKE_CASE_ , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_00_00_00: break return n if __name__ == "__main__": print(F'''{solution() = }''')
315
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = AutoencoderKL __lowerCamelCase = 'sample' __lowerCamelCase = 1e-2 @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = 4 A__ = 3 A__ = (32, 32) A__ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase ) return {"sample": image} @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } A__ = self.dummy_input return init_dict, inputs_dict def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ , A__ = self.prepare_init_args_and_inputs_for_common() A__ = self.model_class(**lowercase ) model.to(lowercase ) assert not model.is_gradient_checkpointing and model.training A__ = model(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() A__ = torch.randn_like(lowercase ) A__ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing A__ = self.model_class(**lowercase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowercase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training A__ = model_a(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() A__ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) A__ = dict(model.named_parameters() ) A__ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ , A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(lowercase ) A__ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) A__ = model.to(lowercase ) model.eval() if torch_device == "mps": A__ = torch.manual_seed(0 ) else: A__ = torch.Generator(device=lowercase ).manual_seed(0 ) A__ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) A__ = image.to(lowercase ) with torch.no_grad(): A__ = model(lowercase , sample_posterior=lowercase , generator=lowercase ).sample A__ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": A__ = torch.tensor( [ -4.00_78e-01, -3.83_23e-04, -1.26_81e-01, -1.14_62e-01, 2.00_95e-01, 1.08_93e-01, -8.82_47e-02, -3.03_61e-01, -9.86_44e-03, ] ) elif torch_device == "cpu": A__ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: A__ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) ) @slow class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' return F'gaussian_noise_s={seed}_shape={"_".join([str(lowercase ) for s in shape] )}.npy' def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self , lowercase=0 , lowercase=(4, 3, 512, 512) , lowercase=False ) -> Optional[int]: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa A__ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) ).to(lowercase ).to(lowercase ) return image def UpperCamelCase ( self , lowercase="CompVis/stable-diffusion-v1-4" , lowercase=False ) -> Any: '''simple docstring''' A__ = "fp16" if fpaa else None A__ = torch.floataa if fpaa else torch.floataa A__ = AutoencoderKL.from_pretrained( lowercase , subfolder="vae" , torch_dtype=lowercase , revision=lowercase , ) model.to(lowercase ).eval() return model def UpperCamelCase ( self , lowercase=0 ) -> List[str]: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(lowercase ) return torch.Generator(device=lowercase ).manual_seed(lowercase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , fpaa=lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) with torch.no_grad(): A__ = model(lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Tuple: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model.encode(lowercase ).latent_dist A__ = dist.sample(generator=lowercase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] A__ = sample[0, -1, -3:, -3:].flatten().cpu() A__ = torch.tensor(lowercase ) A__ = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(lowercase , lowercase , atol=lowercase )
68
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class _UpperCamelCase : '''simple docstring''' __UpperCAmelCase : List[Any] =BlenderbotSmallConfig __UpperCAmelCase : List[str] ={} __UpperCAmelCase : Optional[Any] ="""gelu""" def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = eos_token_id __lowerCAmelCase = pad_token_id __lowerCAmelCase = bos_token_id def snake_case ( self ): __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __lowerCAmelCase = prepare_blenderbot_small_inputs_dict(__a , __a , __a ) return config, inputs_dict def snake_case ( self , __a , __a ): __lowerCAmelCase = TFBlenderbotSmallModel(config=__a ).get_decoder() __lowerCAmelCase = inputs_dict["input_ids"] __lowerCAmelCase = input_ids[:1, :] __lowerCAmelCase = inputs_dict["attention_mask"][:1, :] __lowerCAmelCase = inputs_dict["head_mask"] __lowerCAmelCase = 1 # first forward pass __lowerCAmelCase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a ) __lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) __lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __lowerCAmelCase = model(__a , attention_mask=__a )[0] __lowerCAmelCase = model(__a , attention_mask=__a , past_key_values=__a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx] __lowerCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__a , __a , rtol=1e-3 ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ): '''simple docstring''' if attention_mask is None: __lowerCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __lowerCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Dict =( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __UpperCAmelCase : str =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __UpperCAmelCase : Optional[int] =( { """conversational""": TFBlenderbotSmallForConditionalGeneration, """feature-extraction""": TFBlenderbotSmallModel, """summarization""": TFBlenderbotSmallForConditionalGeneration, """text2text-generation""": TFBlenderbotSmallForConditionalGeneration, """translation""": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __UpperCAmelCase : Union[str, Any] =True __UpperCAmelCase : str =False __UpperCAmelCase : int =False def snake_case ( self ): __lowerCAmelCase = TFBlenderbotSmallModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=__a ) def snake_case ( self ): self.config_tester.run_common_tests() def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a ) @require_tokenizers @require_tf class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : int =[ """Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """ """ i\'m going to throw up.\nand why is that?""" ] __UpperCAmelCase : List[str] ="""facebook/blenderbot_small-90M""" @cached_property def snake_case ( self ): return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def snake_case ( self ): __lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def snake_case ( self ): __lowerCAmelCase = self.tokenizer(self.src_text , return_tensors="tf" ) __lowerCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , ) __lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
57
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCAmelCase__ = logging.getLogger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = label_idx def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: A__ = [] A__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 A__ = [] A__ = [] else: A__ = line.split(" " ) words.append(splits[0] ) if len(lowercase ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(lowercase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(lowercase ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class a__ ( snake_case ): """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class a__ ( snake_case ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: for sentence in parse_incr(lowercase ): A__ = [] A__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(lowercase ) == len(lowercase ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = 0 for sentence in parse_incr(lowercase ): A__ = preds_list[example_id] A__ = "" for token in sentence: out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase ) example_id += 1 def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
68
0
'''simple docstring''' import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""} lowercase_ = { """vocab_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""", }, """emoji_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""", }, } lowercase_ = { """abeja/gpt-neox-japanese-2.7b""": 2_048, } def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) ->Union[str, Any]: with open(SCREAMING_SNAKE_CASE_ , """r""" , encoding="""utf-8""" ) as f: _SCREAMING_SNAKE_CASE = json.loads(f.read() ) _SCREAMING_SNAKE_CASE = collections.OrderedDict() _SCREAMING_SNAKE_CASE = collections.OrderedDict() _SCREAMING_SNAKE_CASE = collections.OrderedDict() with open(SCREAMING_SNAKE_CASE_ , """r""" , encoding="""utf-8""" ) as f: _SCREAMING_SNAKE_CASE = f.readlines() _SCREAMING_SNAKE_CASE = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token] for idx, b in enumerate(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = b _SCREAMING_SNAKE_CASE = idx for wd in b: _SCREAMING_SNAKE_CASE = idx return vocab, raw_vocab, ids_to_tokens, emoji class a_ ( snake_case_ ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , A , A , A="<|endoftext|>" , A="<|endoftext|>" , A="<|startoftext|>" , A="<|endoftext|>" , A=False , **A , ) -> str: super().__init__( unk_token=A , pad_token=A , bos_token=A , eos_token=A , do_clean_text=A , **A , ) if not os.path.isfile(A ): raise ValueError( f'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained' """ model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" ) if not os.path.isfile(A ): raise ValueError( f'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google' """ pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" ) _SCREAMING_SNAKE_CASE = do_clean_text _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = load_vocab_and_emoji(A , A ) _SCREAMING_SNAKE_CASE = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def snake_case_( self ) -> int: return len(self.raw_vocab ) def snake_case_( self ) -> Dict: return dict(self.raw_vocab , **self.added_tokens_encoder ) def snake_case_( self , A ) -> Optional[int]: return self.subword_tokenizer.tokenize(A , clean=self.do_clean_text ) def snake_case_( self , A ) -> List[str]: return self.vocab.get(A , self.vocab.get(self.unk_token ) ) def snake_case_( self , A ) -> int: return self.subword_tokenizer.convert_id_to_token(A ) def snake_case_( self , A ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = """""".join(A ).strip() return out_string def snake_case_( self , A ) -> List[int]: _SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(A , add_special_tokens=A ) + [self.eos_token_id] ) if len(A ) > self.model_max_length: _SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] return input_ids def snake_case_( self , A , A = None ) -> Tuple[str]: _SCREAMING_SNAKE_CASE = 0 if os.path.isdir(A ): _SCREAMING_SNAKE_CASE = os.path.join( A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _SCREAMING_SNAKE_CASE = os.path.join( A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] ) else: _SCREAMING_SNAKE_CASE = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""] ) _SCREAMING_SNAKE_CASE = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""] ) with open(A , """w""" , encoding="""utf-8""" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' """ Please check that the vocabulary is not corrupted!""" ) _SCREAMING_SNAKE_CASE = token_index writer.write(""",""".join(A ) + """\n""" ) index += 1 with open(A , """w""" , encoding="""utf-8""" ) as writer: json.dump(self.emoji , A ) return vocab_file, emoji_file class a_ ( snake_case_ ): '''simple docstring''' def __init__( self , A , A , A ) -> List[Any]: _SCREAMING_SNAKE_CASE = vocab # same as swe _SCREAMING_SNAKE_CASE = ids_to_tokens # same as bpe _SCREAMING_SNAKE_CASE = emoji _SCREAMING_SNAKE_CASE = np.max([len(A ) for w in self.vocab.keys()] ) _SCREAMING_SNAKE_CASE = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" ) _SCREAMING_SNAKE_CASE = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" ) _SCREAMING_SNAKE_CASE = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" ) _SCREAMING_SNAKE_CASE = re.compile( R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" ) _SCREAMING_SNAKE_CASE = re.compile( R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" ) _SCREAMING_SNAKE_CASE = re.compile( R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" ) _SCREAMING_SNAKE_CASE = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿""" _SCREAMING_SNAKE_CASE = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟""" _SCREAMING_SNAKE_CASE = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} ) def __len__( self ) -> Dict: return len(self.ids_to_tokens ) def snake_case_( self , A ) -> Optional[int]: _SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<URL>""" , A ) _SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<EMAIL>""" , A ) _SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<TEL>""" , A ) _SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<DATE>""" , A ) _SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<DATE>""" , A ) _SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<PRICE>""" , A ) _SCREAMING_SNAKE_CASE = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: _SCREAMING_SNAKE_CASE = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" ) return content def snake_case_( self , A , A=False ) -> List[Any]: _SCREAMING_SNAKE_CASE = text.replace(""" """ , """<SP>""" ) _SCREAMING_SNAKE_CASE = text.replace(""" """ , """<SP>""" ) _SCREAMING_SNAKE_CASE = text.replace("""\r\n""" , """<BR>""" ) _SCREAMING_SNAKE_CASE = text.replace("""\n""" , """<BR>""" ) _SCREAMING_SNAKE_CASE = text.replace("""\r""" , """<BR>""" ) _SCREAMING_SNAKE_CASE = text.replace("""\t""" , """<TAB>""" ) _SCREAMING_SNAKE_CASE = text.replace("""—""" , """ー""" ) _SCREAMING_SNAKE_CASE = text.replace("""−""" , """ー""" ) for k, v in self.emoji["emoji"].items(): if k in text: _SCREAMING_SNAKE_CASE = text.replace(A , A ) if clean: _SCREAMING_SNAKE_CASE = self.clean_text(A ) def check_simbol(A ): _SCREAMING_SNAKE_CASE = x.encode() if len(A ) == 1 and len(A ) == 2: _SCREAMING_SNAKE_CASE = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0Xc2a1 and c <= 0Xc2bf) or (c >= 0Xc780 and c <= 0Xc783) or (c >= 0Xcab9 and c <= 0Xcbbf) or (c >= 0Xcc80 and c <= 0Xcda2) ): return True return False def checkuae(A ): _SCREAMING_SNAKE_CASE = x.encode() if len(A ) == 1 and len(A ) == 3: _SCREAMING_SNAKE_CASE = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0Xe2_8080 and c <= 0Xe2_b07f: return True return False _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = [] while pos < len(A ): _SCREAMING_SNAKE_CASE = min(len(A ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3 _SCREAMING_SNAKE_CASE = [] # (token_id, token, pos) for e in range(A , A , -1 ): _SCREAMING_SNAKE_CASE = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(A ) > 2: _SCREAMING_SNAKE_CASE = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(A ) > 0: # the smallest token_id is adopted _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = sorted(A , key=lambda A : x[0] )[0] result.append(A ) _SCREAMING_SNAKE_CASE = e else: _SCREAMING_SNAKE_CASE = pos + 1 _SCREAMING_SNAKE_CASE = text[pos:end] if check_simbol(A ): result.append("""<KIGOU>""" ) elif checkuae(A ): result.append("""<U2000U2BFF>""" ) else: for i in wd.encode("""utf-8""" ): result.append("""<|byte%d|>""" % i ) _SCREAMING_SNAKE_CASE = end return result def snake_case_( self , A , A="\n" ) -> Dict: _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(A ) > 0: words.append(bytearray(A ).decode("""utf-8""" , errors="""replace""" ) ) _SCREAMING_SNAKE_CASE = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["""emoji_inv"""][word] ) elif word == "<SP>": words.append(""" """ ) elif word == "<BR>": words.append(A ) elif word == "<TAB>": words.append("""\t""" ) elif word == "<BLOCK>": words.append("""▀""" ) elif word == "<KIGOU>": words.append("""ǀ""" ) elif word == "<U2000U2BFF>": words.append("""‖""" ) else: words.append(A ) if len(A ) > 0: words.append(bytearray(A ).decode("""utf-8""" , errors="""replace""" ) ) _SCREAMING_SNAKE_CASE = """""".join(A ) return text
58
import random class a__ : """simple docstring""" @staticmethod def UpperCamelCase ( lowercase ) -> tuple[list[int], list[int]]: '''simple docstring''' A__ = [ord(lowercase ) for i in text] A__ = [] A__ = [] for i in plain: A__ = random.randint(1 , 300 ) A__ = (i + k) * k cipher.append(lowercase ) key.append(lowercase ) return cipher, key @staticmethod def UpperCamelCase ( lowercase , lowercase ) -> str: '''simple docstring''' A__ = [] for i in range(len(lowercase ) ): A__ = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowercase ) ) return "".join(lowercase ) if __name__ == "__main__": lowerCAmelCase__ , lowerCAmelCase__ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
68
0
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __lowerCAmelCase = '\\n Text data.\n Second line of data.' __lowerCAmelCase = 'file' @pytest.fixture(scope="""session""" ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""") _snake_case = bytes(SCREAMING_SNAKE_CASE_ , """utf-8""" ) with zstd.open(SCREAMING_SNAKE_CASE_ , """wb""" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , """w""" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return FILE_PATH @pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path} _snake_case = input_paths[compression_format] _snake_case = tmp_path / """cache""" _snake_case = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) _snake_case = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ ) as f: _snake_case = f.read() with open(SCREAMING_SNAKE_CASE_ ) as f: _snake_case = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("""default_extracted""" , [True, False] ) @pytest.mark.parametrize("""default_cache_dir""" , [True, False] ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = """custom_cache""" _snake_case = """custom_extracted_dir""" _snake_case = tmp_path / """custom_extracted_path""" if default_extracted: _snake_case = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""") else: monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , SCREAMING_SNAKE_CASE_ ) monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(SCREAMING_SNAKE_CASE_ ) ) _snake_case = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _snake_case = xz_file _snake_case = ( DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) ) _snake_case = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file # relative path _snake_case = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = str(tmp_path.resolve() / """__missing_file__.txt""" ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) # relative path _snake_case = """./__missing_file__.txt""" with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(SCREAMING_SNAKE_CASE_ ) as f: _snake_case = f.read() assert output_file_content == FILE_CONTENT @patch("""datasets.config.HF_DATASETS_OFFLINE""" , SCREAMING_SNAKE_CASE_ ) def __SCREAMING_SNAKE_CASE ( ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , SCREAMING_SNAKE_CASE_ ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_get("""https://huggingface.co""" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , SCREAMING_SNAKE_CASE_ ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_get("""ftp://huggingface.co""" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_head("""ftp://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , SCREAMING_SNAKE_CASE_ ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_get("""s3://huggingface.co""" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_head("""s3://huggingface.co""" )
341
def lowerCAmelCase__ ( ) -> Any: '''simple docstring''' for n in range(1 , 1_0_0_0_0_0_0 ): yield n * (n + 1) // 2 def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Any: '''simple docstring''' A__ = 1 A__ = 2 while i * i <= n: A__ = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowerCAmelCase__ ( ) -> Dict: '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE_ ) > 5_0_0 ) if __name__ == "__main__": print(solution())
68
0
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets lowercase_ = datasets.logging.get_logger(__name__) lowercase_ = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n' lowercase_ = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n' lowercase_ = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n' lowercase_ = { 'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip', 'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip', 'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip', 'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip', 'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip', 'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip', 'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip', 'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip', 'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip', 'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def A__ ( self ) -> str: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , ) def A__ ( self , lowerCAmelCase ) -> Tuple: '''simple docstring''' if self.config_name == "default": logger.warning( 'Using default BLEURT-Base checkpoint for sequence maximum length 128. ' 'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' ) _lowercase ='bleurt-base-128' if self.config_name.lower() in CHECKPOINT_URLS: _lowercase =self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: _lowercase =self.config_name.upper() else: raise KeyError( F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer _lowercase =dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) _lowercase =score.BleurtScorer(os.path.join(lowerCAmelCase , lowerCAmelCase ) ) def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> str: '''simple docstring''' _lowercase =self.scorer.score(references=lowerCAmelCase , candidates=lowerCAmelCase ) return {"scores": scores}
205
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCAmelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: lowerCAmelCase__ = json.load(f) @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return FSMTTokenizer.from_pretrained(lowercase ) def UpperCamelCase ( self , lowercase ) -> Optional[int]: '''simple docstring''' A__ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = F'facebook/wmt19-{pair}' A__ = self.get_tokenizer(lowercase ) A__ = self.get_model(lowercase ) A__ = bleu_data[pair]["src"] A__ = bleu_data[pair]["tgt"] A__ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase ) A__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A__ = tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) A__ = calculate_bleu(lowercase , lowercase ) print(lowercase ) self.assertGreaterEqual(scores["bleu"] , lowercase )
68
0
"""simple docstring""" def _snake_case ( UpperCamelCase : Optional[Any] ): UpperCAmelCase : Optional[Any] = [] UpperCAmelCase : Any = [] UpperCAmelCase : Optional[Any] = { """^""": 3, """*""": 2, """/""": 2, """%""": 2, """+""": 1, """-""": 1, } # Priority of each operator UpperCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) if (len(SCREAMING_SNAKE_CASE_ ) > 7) else 7 # Print table header for output print( """Symbol""".center(8 ) , """Stack""".center(SCREAMING_SNAKE_CASE_ ) , """Postfix""".center(SCREAMING_SNAKE_CASE_ ) , sep=""" | """ , ) print("""-""" * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(SCREAMING_SNAKE_CASE_ ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(SCREAMING_SNAKE_CASE_ ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(SCREAMING_SNAKE_CASE_ ) == 0: stack.append(SCREAMING_SNAKE_CASE_ ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(SCREAMING_SNAKE_CASE_ ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(SCREAMING_SNAKE_CASE_ ) # push x to stack print( x.center(8 ) , ("""""".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , ("""""".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=""" | """ , ) # Output in tabular format while len(SCREAMING_SNAKE_CASE_ ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( """ """.center(8 ) , ("""""".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , ("""""".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=""" | """ , ) # Output in tabular format return "".join(SCREAMING_SNAKE_CASE_ ) # return Postfix as str def _snake_case ( UpperCamelCase : Tuple ): UpperCAmelCase : str = list(infix[::-1] ) # reverse the infix equation for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if infix[i] == "(": UpperCAmelCase : List[str] = """)""" # change "(" to ")" elif infix[i] == ")": UpperCAmelCase : Optional[Any] = """(""" # change ")" to "(" return (infix_2_postfix("""""".join(SCREAMING_SNAKE_CASE_ ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": A: List[Any] = input("\nEnter an Infix Equation = ") # Input an Infix equation A: Optional[int] = "".join(Infix.split()) # Remove spaces from the input print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
109
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> list: '''simple docstring''' A__ = int(SCREAMING_SNAKE_CASE_ ) if n_element < 1: A__ = ValueError("a should be a positive number" ) raise my_error A__ = [1] A__ , A__ , A__ = (0, 0, 0) A__ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": lowerCAmelCase__ = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") lowerCAmelCase__ = hamming(int(n)) print("""-----------------------------------------------------""") print(f"""The list with nth numbers is: {hamming_numbers}""") print("""-----------------------------------------------------""")
68
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCamelCase__ : Tuple = 16 lowerCamelCase__ : str = 32 def UpperCAmelCase_ ( __UpperCAmelCase : Accelerator , __UpperCAmelCase : int = 16 ) -> Tuple: SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' ) SCREAMING_SNAKE_CASE_ = load_dataset('glue' , 'mrpc' ) def tokenize_function(__UpperCAmelCase : Dict ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE_ = datasets.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(__UpperCAmelCase : Optional[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE_ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE_ = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE_ = 8 else: SCREAMING_SNAKE_CASE_ = None return tokenizer.pad( SCREAMING_SNAKE_CASE_ , padding='longest' , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_ = DataLoader( tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = DataLoader( tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCamelCase__ : Dict = mocked_dataloaders # noqa: F811 def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple ) -> str: if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE_ ) == "1": SCREAMING_SNAKE_CASE_ = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: SCREAMING_SNAKE_CASE_ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: SCREAMING_SNAKE_CASE_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_ = config['lr'] SCREAMING_SNAKE_CASE_ = int(config['num_epochs'] ) SCREAMING_SNAKE_CASE_ = int(config['seed'] ) SCREAMING_SNAKE_CASE_ = int(config['batch_size'] ) set_seed(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation SCREAMING_SNAKE_CASE_ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: SCREAMING_SNAKE_CASE_ = batch_size // MAX_GPU_BATCH_SIZE SCREAMING_SNAKE_CASE_ = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE_ = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE_ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ ) # Instantiate scheduler SCREAMING_SNAKE_CASE_ = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: SCREAMING_SNAKE_CASE_ = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split('.' )[0] accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE_ ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: SCREAMING_SNAKE_CASE_ = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() SCREAMING_SNAKE_CASE_ = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , ) SCREAMING_SNAKE_CASE_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , SCREAMING_SNAKE_CASE_ ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { 'accuracy': eval_metric['accuracy'], 'f1': eval_metric['f1'], 'train_loss': total_loss.item() / len(SCREAMING_SNAKE_CASE_ ), 'epoch': epoch, } , step=SCREAMING_SNAKE_CASE_ , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def UpperCAmelCase_ ( ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=SCREAMING_SNAKE_CASE_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) SCREAMING_SNAKE_CASE_ = parser.parse_args() SCREAMING_SNAKE_CASE_ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
225
import copy import random from transformers import CLIPTokenizer class a__ ( snake_case ): """simple docstring""" def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' super().__init__(*lowercase , **lowercase ) A__ = {} def UpperCamelCase ( self , lowercase , *lowercase , **lowercase ) -> str: '''simple docstring''' A__ = super().add_tokens(lowercase , *lowercase , **lowercase ) if num_added_tokens == 0: raise ValueError( F'The tokenizer already contains the token {placeholder_token}. Please pass a different' " `placeholder_token` that is not already in the tokenizer." ) def UpperCamelCase ( self , lowercase , *lowercase , lowercase=1 , **lowercase ) -> Any: '''simple docstring''' A__ = [] if num_vec_per_token == 1: self.try_adding_tokens(lowercase , *lowercase , **lowercase ) output.append(lowercase ) else: A__ = [] for i in range(lowercase ): A__ = placeholder_token + F'_{i}' self.try_adding_tokens(lowercase , *lowercase , **lowercase ) output.append(lowercase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'The tokenizer already has placeholder token {token} that can get confused with' F' {placeholder_token}keep placeholder tokens independent' ) A__ = output def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=1.0 ) -> List[Any]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = [] for i in range(len(lowercase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: A__ = self.token_map[placeholder_token] A__ = tokens[: 1 + int(len(lowercase ) * prop_tokens_to_load )] if vector_shuffle: A__ = copy.copy(lowercase ) random.shuffle(lowercase ) A__ = text.replace(lowercase , " ".join(lowercase ) ) return text def __call__( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> str: '''simple docstring''' return super().__call__( self.replace_placeholder_tokens_in_text( lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , ) def UpperCamelCase ( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> List[str]: '''simple docstring''' return super().encode( self.replace_placeholder_tokens_in_text( lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
68
0
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] ) if ( min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCAmelCase = 0 count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
260
from collections import deque from math import floor from random import random from time import time class a__ : """simple docstring""" def __init__( self ) -> Dict: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple: '''simple docstring''' if self.graph.get(lowercase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A__ = [[w, v]] if not self.graph.get(lowercase ): A__ = [] def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase , lowercase ) -> int: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Any: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' A__ = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self , lowercase=-2 ) -> str: '''simple docstring''' A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s A__ = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return sorted_nodes def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> int: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin class a__ : """simple docstring""" def __init__( self ) -> int: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A__ = [[w, v]] # add the other way if self.graph.get(lowercase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A__ = [[w, u]] def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) # the other way round if self.graph.get(lowercase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> str: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Dict: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin
68
0
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants __UpperCAmelCase =3_0_0 # TEMPERATURE (unit = K) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> float: if donor_conc <= 0: raise ValueError('''Donor concentration should be positive''' ) elif acceptor_conc <= 0: raise ValueError('''Acceptor concentration should be positive''' ) elif intrinsic_conc <= 0: raise ValueError('''Intrinsic concentration should be positive''' ) elif donor_conc <= intrinsic_conc: raise ValueError( '''Donor concentration should be greater than intrinsic concentration''' ) elif acceptor_conc <= intrinsic_conc: raise ValueError( '''Acceptor concentration should be greater than intrinsic concentration''' ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
67
import datasets from .evaluate import evaluate lowerCAmelCase__ = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ lowerCAmelCase__ = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ lowerCAmelCase__ = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": { "id": datasets.Value("string" ), "prediction_text": datasets.features.Sequence(datasets.Value("string" ) ), }, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , ) def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} A__ = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] A__ = evaluate(dataset=lowercase , predictions=lowercase ) return score
68
0
"""simple docstring""" from typing import List from .keymap import KEYMAP, get_character def lowercase_ ( __UpperCAmelCase ) -> Dict: def decorator(__UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , [] ) handle += [key] setattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , SCREAMING_SNAKE_CASE_ ) return func return decorator def lowercase_ ( *__UpperCAmelCase ) -> Any: def decorator(__UpperCAmelCase ): lowerCAmelCase__ : Any = getattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , [] ) handle += keys setattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , SCREAMING_SNAKE_CASE_ ) return func return decorator class _lowerCamelCase ( a_ ): def __new__( cls : int , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ) -> str: """simple docstring""" lowerCAmelCase__ : Dict = super().__new__(cls , UpperCamelCase , UpperCamelCase , UpperCamelCase ) if not hasattr(UpperCamelCase , """key_handler""" ): setattr(UpperCamelCase , """key_handler""" , {} ) setattr(UpperCamelCase , """handle_input""" , KeyHandler.handle_input ) for value in attrs.values(): lowerCAmelCase__ : int = getattr(UpperCamelCase , """handle_key""" , [] ) for key in handled_keys: lowerCAmelCase__ : List[str] = value return new_cls @staticmethod def _lowerCAmelCase ( cls : Optional[int] ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : int = get_character() if char != KEYMAP["undefined"]: lowerCAmelCase__ : Union[str, Any] = ord(UpperCamelCase ) lowerCAmelCase__ : Dict = cls.key_handler.get(UpperCamelCase ) if handler: lowerCAmelCase__ : Dict = char return handler(cls ) else: return None def lowercase_ ( cls ) -> Dict: return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
242
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int: '''simple docstring''' A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = CLIPConfig __lowerCamelCase = ['CLIPEncoderLayer'] def __init__( self , lowercase ) -> Optional[int]: '''simple docstring''' super().__init__(lowercase ) A__ = CLIPVisionModel(config.vision_config ) A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase ) A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase ) A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase ) @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy() A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy() A__ = [] A__ = image_embeds.shape[0] for i in range(lowercase ): A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): A__ = special_cos_dist[i][concept_idx] A__ = self.special_care_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) A__ = 0.01 for concept_idx in range(len(cos_dist[0] ) ): A__ = cos_dist[i][concept_idx] A__ = self.concept_embeds_weights[concept_idx].item() A__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowercase ) result.append(lowercase ) A__ = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCamelCase ( self , lowercase , lowercase ) -> Any: '''simple docstring''' A__ = self.vision_model(lowercase )[1] # pooled_output A__ = self.visual_projection(lowercase ) A__ = cosine_distance(lowercase , self.special_care_embeds ) A__ = cosine_distance(lowercase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images A__ = 0.0 A__ = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) A__ = torch.any(special_scores > 0 , dim=1 ) A__ = special_care * 0.01 A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) A__ = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
68
0
import logging import os import threading import time try: import warnings except ImportError: _lowerCamelCase =None try: import msvcrt except ImportError: _lowerCamelCase =None try: import fcntl except ImportError: _lowerCamelCase =None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: _lowerCamelCase =OSError # Data # ------------------------------------------------ _lowerCamelCase =[ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] _lowerCamelCase ="""3.0.12""" _lowerCamelCase =None def _a ( ): global _logger lowerCamelCase : Tuple = _logger or logging.getLogger(__name__ ) return _logger class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , __magic_name__ ): lowerCamelCase : Dict = lock_file return None def __str__( self ): lowerCamelCase : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class A__ : def __init__( self , __magic_name__ ): lowerCamelCase : Optional[int] = lock return None def __enter__( self ): return self.lock def __exit__( self , __magic_name__ , __magic_name__ , __magic_name__ ): self.lock.release() return None class A__ : def __init__( self , __magic_name__ , __magic_name__=-1 , __magic_name__=None ): lowerCamelCase : int = max_filename_length if max_filename_length is not None else 2_5_5 # Hash the filename if it's too long lowerCamelCase : Tuple = self.hash_filename_if_too_long(__magic_name__ , __magic_name__ ) # The path to the lock file. lowerCamelCase : Optional[int] = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. lowerCamelCase : Any = None # The default timeout value. lowerCamelCase : int = timeout # We use this lock primarily for the lock counter. lowerCamelCase : Optional[int] = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. lowerCamelCase : Tuple = 0 return None @property def UpperCamelCase__ ( self ): return self._lock_file @property def UpperCamelCase__ ( self ): return self._timeout @timeout.setter def UpperCamelCase__ ( self , __magic_name__ ): lowerCamelCase : Dict = float(__magic_name__ ) return None def UpperCamelCase__ ( self ): raise NotImplementedError() def UpperCamelCase__ ( self ): raise NotImplementedError() @property def UpperCamelCase__ ( self ): return self._lock_file_fd is not None def UpperCamelCase__ ( self , __magic_name__=None , __magic_name__=0.05 ): if timeout is None: lowerCamelCase : List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 lowerCamelCase : int = id(self ) lowerCamelCase : int = self._lock_file lowerCamelCase : int = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(__magic_name__ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: lowerCamelCase : str = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCamelCase__ ( self , __magic_name__=False ): with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: lowerCamelCase : str = id(self ) lowerCamelCase : Dict = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() lowerCamelCase : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self ): self.acquire() return self def __exit__( self , __magic_name__ , __magic_name__ , __magic_name__ ): self.release() return None def __del__( self ): self.release(force=__magic_name__ ) return None def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ): lowerCamelCase : Dict = os.path.basename(__magic_name__ ) if len(__magic_name__ ) > max_length and max_length > 0: lowerCamelCase : Any = os.path.dirname(__magic_name__ ) lowerCamelCase : List[str] = str(hash(__magic_name__ ) ) lowerCamelCase : Tuple = filename[: max_length - len(__magic_name__ ) - 8] + """...""" + hashed_filename + """.lock""" return os.path.join(__magic_name__ , __magic_name__ ) else: return path class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , __magic_name__ , __magic_name__=-1 , __magic_name__=None ): from .file_utils import relative_to_absolute_path super().__init__(__magic_name__ , timeout=__magic_name__ , max_filename_length=__magic_name__ ) lowerCamelCase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: lowerCamelCase : Optional[int] = os.open(self._lock_file , __magic_name__ ) except OSError: pass else: try: msvcrt.locking(__magic_name__ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__magic_name__ ) else: lowerCamelCase : Union[str, Any] = fd return None def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self._lock_file_fd lowerCamelCase : int = None msvcrt.locking(__magic_name__ , msvcrt.LK_UNLCK , 1 ) os.close(__magic_name__ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , __magic_name__ , __magic_name__=-1 , __magic_name__=None ): lowerCamelCase : Dict = os.statvfs(os.path.dirname(__magic_name__ ) ).f_namemax super().__init__(__magic_name__ , timeout=__magic_name__ , max_filename_length=__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC lowerCamelCase : int = os.open(self._lock_file , __magic_name__ ) try: fcntl.flock(__magic_name__ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__magic_name__ ) else: lowerCamelCase : Tuple = fd return None def UpperCamelCase__ ( self ): lowerCamelCase : Dict = self._lock_file_fd lowerCamelCase : Optional[Any] = None fcntl.flock(__magic_name__ , fcntl.LOCK_UN ) os.close(__magic_name__ ) return None class A__ ( __SCREAMING_SNAKE_CASE): def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: lowerCamelCase : Tuple = os.open(self._lock_file , __magic_name__ ) except OSError: pass else: lowerCamelCase : List[str] = fd return None def UpperCamelCase__ ( self ): os.close(self._lock_file_fd ) lowerCamelCase : Tuple = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None _lowerCamelCase =None if msvcrt: _lowerCamelCase =WindowsFileLock elif fcntl: _lowerCamelCase =UnixFileLock else: _lowerCamelCase =SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
287
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
68
0
"""simple docstring""" from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class lowercase_ : '''simple docstring''' def __init__( self : str , _UpperCAmelCase : str , ): _A = parent _A = 13 _A = 7 _A = 30 _A = self.seq_length + self.mem_len _A = 15 _A = True _A = True _A = 99 _A = [10, 50, 80] _A = 32 _A = 32 _A = 4 _A = 8 _A = 128 _A = 2 _A = 2 _A = None _A = 1 _A = 0 _A = 3 _A = self.vocab_size - 1 _A = 0.01 def lowerCAmelCase_ ( self : List[Any] ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowerCAmelCase_ ( self : int ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ): _A = TFTransfoXLModel(_UpperCAmelCase ) _A , _A = model(_UpperCAmelCase ).to_tuple() _A = {'input_ids': input_ids_a, 'mems': mems_a} _A , _A = model(_UpperCAmelCase ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ): _A = TFTransfoXLLMHeadModel(_UpperCAmelCase ) _A , _A = model(_UpperCAmelCase ).to_tuple() _A = {'input_ids': input_ids_a, 'labels': lm_labels} _A , _A = model(_UpperCAmelCase ).to_tuple() _A , _A = model([input_ids_a, mems_a] ).to_tuple() _A = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels} _A , _A = model(_UpperCAmelCase ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str ): _A = TFTransfoXLForSequenceClassification(_UpperCAmelCase ) _A = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = self.prepare_config_and_inputs() ((_A) , (_A) , (_A) , (_A)) = config_and_inputs _A = {'input_ids': input_ids_a} return config, inputs_dict @require_tf class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Any = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) UpperCAmelCase : int = () if is_tf_available() else () UpperCAmelCase : Optional[int] = ( { '''feature-extraction''': TFTransfoXLModel, '''text-classification''': TFTransfoXLForSequenceClassification, '''text-generation''': TFTransfoXLLMHeadModel, '''zero-shot''': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented UpperCAmelCase : Dict = False UpperCAmelCase : Optional[Any] = False UpperCAmelCase : Union[str, Any] = False UpperCAmelCase : Any = False def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowerCAmelCase_ ( self : int ): _A = TFTransfoXLModelTester(self ) _A = ConfigTester(self , config_class=_UpperCAmelCase , d_embed=37 ) def lowerCAmelCase_ ( self : Tuple ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : List[Any] ): self.model_tester.set_seed() _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): self.model_tester.set_seed() _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: _A = model_class(_UpperCAmelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: _A = model.get_output_embeddings() assert isinstance(_UpperCAmelCase , tf.keras.layers.Layer ) _A = model.get_bias() assert name is None else: _A = model.get_output_embeddings() assert x is None _A = model.get_bias() assert name is None def lowerCAmelCase_ ( self : Dict ): pass @slow def lowerCAmelCase_ ( self : Dict ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFTransfoXLModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' ) def lowerCAmelCase_ ( self : Dict ): pass @require_tf class lowercase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('Skip test until #12651 is resolved.' ) @slow def lowerCAmelCase_ ( self : Dict ): _A = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' ) # fmt: off _A = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off _A = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> _A = model.generate(_UpperCAmelCase , max_length=200 , do_sample=_UpperCAmelCase ) self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase )
315
import string def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None: '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): A__ = "" for symbol in message: if symbol in string.ascii_uppercase: A__ = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ ) A__ = num - key if num < 0: A__ = num + len(string.ascii_uppercase ) A__ = translated + string.ascii_uppercase[num] else: A__ = translated + symbol print(F'Decryption using Key #{key}: {translated}' ) def lowerCAmelCase__ ( ) -> None: '''simple docstring''' A__ = input("Encrypted message: " ) A__ = message.upper() decrypt(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
68
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) A : str = { "configuration_efficientformer": [ "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientFormerConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = ["EfficientFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = [ "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientFormerForImageClassification", "EfficientFormerForImageClassificationWithTeacher", "EfficientFormerModel", "EfficientFormerPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerModel", "TFEfficientFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""") @require_sentencepiece @require_tokenizers class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = SpeechTaTokenizer __lowerCamelCase = False __lowerCamelCase = True def UpperCamelCase ( self ) -> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = SpeechTaTokenizer(lowercase ) A__ = AddedToken("<mask>" , lstrip=lowercase , rstrip=lowercase ) A__ = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = "this is a test" A__ = "this is a test" return input_text, output_text def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ) -> Optional[Any]: '''simple docstring''' A__ , A__ = self.get_input_output_texts(lowercase ) A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) return text, ids def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = "<pad>" A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-4] , "œ" ) self.assertEqual(vocab_keys[-2] , "<mask>" ) self.assertEqual(vocab_keys[-1] , "<ctc_blank>" ) self.assertEqual(len(lowercase ) , 81 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"] A__ = tokenizer.add_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size + len(lowercase ) ) A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} A__ = tokenizer.add_special_tokens(lowercase ) A__ = tokenizer.vocab_size A__ = len(lowercase ) self.assertNotEqual(lowercase , 0 ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , len(lowercase ) ) self.assertEqual(lowercase , all_size_a + len(lowercase ) ) A__ = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowercase ) self.assertGreaterEqual(len(lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ = tokenizer.tokenize("This is a test" ) # fmt: off self.assertListEqual(lowercase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) A__ = tokenizer.convert_tokens_to_ids(lowercase ) # fmt: off self.assertListEqual(lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on A__ = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off A__ = { "input_ids": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowercase , )
68
0
'''simple docstring''' from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def lowerCamelCase ( __lowerCamelCase : bool = True , *__lowerCamelCase : Any , **__lowerCamelCase : Any ) ->int: if not is_tqdm_available(): raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" ) _SCREAMING_SNAKE_CASE = False if main_process_only: _SCREAMING_SNAKE_CASE = PartialState().local_process_index == 0 return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
58
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> List[str]: '''simple docstring''' A__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A__ = F'{src_lang}-{tgt_lang}' A__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(F'Generating {path}' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent lowerCAmelCase__ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = model_name.split("""-""") lowerCAmelCase__ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
68
0
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return [sentence[i : i + ngram_size] for i in range(len(SCREAMING_SNAKE_CASE_ ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
341
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = feature_size A__ = sampling_rate A__ = padding_value A__ = kwargs.pop("padding_side" , "right" ) A__ = kwargs.pop("return_attention_mask" , lowercase ) super().__init__(**lowercase ) def UpperCamelCase ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature: '''simple docstring''' if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): A__ = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F' to this method that includes {self.model_input_names[0]}, but you provided' F' {list(processed_features.keys() )}' ) A__ = processed_features[self.model_input_names[0]] A__ = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowercase ) == 0: if return_attention_mask: A__ = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch A__ = required_input[0] if isinstance(lowercase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. A__ = 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowercase ): A__ = required_input[index][0] if return_tensors is None: if is_tf_tensor(lowercase ): A__ = "tf" elif is_torch_tensor(lowercase ): A__ = "pt" elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ): A__ = "np" else: raise ValueError( F'type of {first_element} unknown: {type(lowercase )}. ' "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): A__ = to_numpy(lowercase ) else: A__ = [to_numpy(lowercase ) for v in value] # Convert padding_strategy in PaddingStrategy A__ = self._get_padding_strategies(padding=lowercase , max_length=lowercase ) A__ = processed_features[self.model_input_names[0]] A__ = len(lowercase ) if not all(len(lowercase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) A__ = [] for i in range(lowercase ): A__ = {k: v[i] for k, v in processed_features.items()} # truncation A__ = self._truncate( lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , ) truncated_inputs.append(lowercase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) A__ = PaddingStrategy.MAX_LENGTH A__ = {} for i in range(lowercase ): # padding A__ = self._pad( truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , ) for key, value in outputs.items(): if key not in batch_outputs: A__ = [] if value.dtype is np.dtype(np.floataa ): A__ = value.astype(np.floataa ) batch_outputs[key].append(lowercase ) return BatchFeature(lowercase , tensor_type=lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict: '''simple docstring''' A__ = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: A__ = len(lowercase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: A__ = np.ones(len(lowercase ) , dtype=np.intaa ) if needs_to_be_padded: A__ = max_length - len(lowercase ) if self.padding_side == "right": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (0, difference) ) A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: A__ = np.pad( processed_features["attention_mask"] , (difference, 0) ) A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) A__ = np.pad( lowercase , lowercase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]: '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) A__ = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of A__ = len(lowercase ) > max_length if needs_to_be_truncated: A__ = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: A__ = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase ( self , lowercase=False , lowercase=None ) -> Any: '''simple docstring''' if padding is not False: if padding is True: A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowercase , lowercase ): A__ = PaddingStrategy(lowercase ) elif isinstance(lowercase , lowercase ): A__ = padding else: A__ = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
68
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def a ( A__ : Any , A__ : List[Any]=False , A__ : Union[str, Any]=False ) -> int: """simple docstring""" _lowercase ='backbone.' if is_semantic else '' _lowercase =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def a ( A__ : Tuple , A__ : List[Any] , A__ : List[str]=False , A__ : int=False ) -> int: """simple docstring""" for i in range(config.num_hidden_layers ): _lowercase ='backbone.' if is_semantic else '' # queries, keys and values _lowercase =state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) _lowercase =state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) _lowercase =state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) _lowercase =in_proj_weight[ : config.hidden_size, : ] _lowercase =q_bias _lowercase =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _lowercase =in_proj_weight[ -config.hidden_size :, : ] _lowercase =v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained _lowercase =state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) _lowercase =state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) _lowercase =gamma_a _lowercase =gamma_a def a ( A__ : Tuple , A__ : Any , A__ : Optional[Any] ) -> int: """simple docstring""" _lowercase =dct.pop(SCREAMING_SNAKE_CASE_ ) _lowercase =val def a ( ) -> Any: """simple docstring""" _lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg' _lowercase =Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def a ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : str=False ) -> List[Any]: """simple docstring""" _lowercase =False if 'rvlcdip' in checkpoint_url else True _lowercase =BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: _lowercase =1024 _lowercase =4096 _lowercase =24 _lowercase =16 # labels if "rvlcdip" in checkpoint_url: _lowercase =16 _lowercase ='huggingface/label-files' _lowercase ='rvlcdip-id2label.json' _lowercase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) ) _lowercase ={int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} _lowercase =idalabel _lowercase ={v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys _lowercase =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model'] _lowercase =create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ ) # load HuggingFace model _lowercase =BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image _lowercase =BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ ) _lowercase =prepare_img() _lowercase =image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) _lowercase =encoding['pixel_values'] _lowercase =model(SCREAMING_SNAKE_CASE_ ) _lowercase =outputs.logits # verify logits _lowercase =[1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected" Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: if has_lm_head: _lowercase ='dit-base' if 'base' in checkpoint_url else 'dit-large' else: _lowercase ='dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth', type=str, help='URL to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', ) lowercase_ = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
205
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase__ = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
0
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow A: Tuple = False class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=32 ) -> Union[str, Any]: '''simple docstring''' set_seed(0 ) UpperCAmelCase : Dict = UNetaDModel(sample_size=_SCREAMING_SNAKE_CASE , in_channels=3 , out_channels=3 ) UpperCAmelCase : str = torch.optim.SGD(model.parameters() , lr=0.0001 ) return model, optimizer @slow def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase : Any = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable UpperCAmelCase : Tuple = DDPMScheduler( num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=_SCREAMING_SNAKE_CASE , ) UpperCAmelCase : str = DDIMScheduler( num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=_SCREAMING_SNAKE_CASE , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) UpperCAmelCase : Tuple = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_SCREAMING_SNAKE_CASE ) for _ in range(4 )] UpperCAmelCase : Optional[Any] = [torch.randn((4, 3, 32, 32) ).to(_SCREAMING_SNAKE_CASE ) for _ in range(4 )] UpperCAmelCase : str = [torch.randint(0 , 1000 , (4,) ).long().to(_SCREAMING_SNAKE_CASE ) for _ in range(4 )] # train with a DDPM scheduler UpperCAmelCase , UpperCAmelCase : List[Any] = self.get_model_optimizer(resolution=32 ) model.train().to(_SCREAMING_SNAKE_CASE ) for i in range(4 ): optimizer.zero_grad() UpperCAmelCase : List[str] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) UpperCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , timesteps[i] ).sample UpperCAmelCase : Dict = torch.nn.functional.mse_loss(_SCREAMING_SNAKE_CASE , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM UpperCAmelCase , UpperCAmelCase : List[Any] = self.get_model_optimizer(resolution=32 ) model.train().to(_SCREAMING_SNAKE_CASE ) for i in range(4 ): optimizer.zero_grad() UpperCAmelCase : List[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , timesteps[i] ).sample UpperCAmelCase : List[Any] = torch.nn.functional.mse_loss(_SCREAMING_SNAKE_CASE , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-5 ) ) self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-5 ) )
109
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'gpt_neox_japanese' def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict: '''simple docstring''' super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_multiple_size A__ = hidden_act A__ = rotary_pct A__ = rotary_emb_base A__ = initializer_range A__ = layer_norm_eps A__ = use_cache A__ = attention_dropout A__ = hidden_dropout
68
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Any ): SCREAMING_SNAKE_CASE_ = [[1, 2, 4], [1, 2, 3, 4]] SCREAMING_SNAKE_CASE_ = DisjunctiveConstraint(_lowerCAmelCase ) self.assertTrue(isinstance(dc.token_ids , _lowerCAmelCase ) ) with self.assertRaises(_lowerCAmelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_lowerCAmelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def lowerCAmelCase_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE_ = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_lowerCAmelCase ): DisjunctiveConstraint(_lowerCAmelCase ) # fails here def lowerCAmelCase_ ( self : str ): SCREAMING_SNAKE_CASE_ = [[1, 2, 3], [1, 2, 4]] SCREAMING_SNAKE_CASE_ = DisjunctiveConstraint(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(1 ) SCREAMING_SNAKE_CASE_ = stepped is True and completed is False and reset is False self.assertTrue(_lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(2 ) SCREAMING_SNAKE_CASE_ = stepped is True and completed is False and reset is False self.assertTrue(_lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(3 ) SCREAMING_SNAKE_CASE_ = stepped is True and completed is True and reset is False self.assertTrue(_lowerCAmelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def lowerCAmelCase_ ( self : Tuple ): SCREAMING_SNAKE_CASE_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] SCREAMING_SNAKE_CASE_ = DisjunctiveConstraint(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
225
import warnings from functools import wraps from typing import Callable def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable ) -> Callable: '''simple docstring''' @wraps(SCREAMING_SNAKE_CASE_ ) def _inner_fn(*SCREAMING_SNAKE_CASE_: int , **SCREAMING_SNAKE_CASE_: Union[str, Any] ): warnings.warn( (F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE_ , ) return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return _inner_fn
68
0
"""simple docstring""" import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values __A : Tuple = argparse.ArgumentParser() parser.add_argument("--user", type=str, default="ubuntu") parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--key_path", type=str, default=None) parser.add_argument("--instance", type=str, default="V100:1") parser.add_argument("--provider", type=str, default="cheapest") parser.add_argument("--use_spot", type=bool, default=False) parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py") __A , __A : Union[str, Any] = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("Cannot specify both BYO and on-demand cluster args") __A : str = rh.cluster( name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path} ) else: __A : str = rh.cluster( name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) __A : str = args.example.rsplit("/", 1)[0] # Set up remote environment cluster.install_packages(["pip:./"]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
260
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) lowerCAmelCase__ = """\ Text data. Second line of data.""" lowerCAmelCase__ = """file""" @pytest.fixture(scope="session" ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") A__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" ) with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> List[str]: '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return FILE_PATH @pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Any: '''simple docstring''' A__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} A__ = input_paths[compression_format] A__ = tmp_path / "cache" A__ = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" , [True, False] ) @pytest.mark.parametrize("default_cache_dir" , [True, False] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Dict: '''simple docstring''' A__ = "custom_cache" A__ = "custom_extracted_dir" A__ = tmp_path / "custom_extracted_path" if default_extracted: A__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) ) A__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) A__ = xz_file A__ = ( DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) ) A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]: '''simple docstring''' A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file # relative path A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]: '''simple docstring''' A__ = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) # relative path A__ = "./__missing_file__.txt" with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]: '''simple docstring''' A__ = get_from_cache(F'tmp://{tmpfs_file}' ) with open(SCREAMING_SNAKE_CASE_ ) as f: A__ = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( ) -> List[Any]: '''simple docstring''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str: '''simple docstring''' A__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_head("s3://huggingface.co" )
68
0
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowerCAmelCase ( ) -> str: __lowerCamelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase = parser.parse_args_into_dataclasses()[0] __lowerCamelCase = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ ) try: __lowerCamelCase = parser.parse_args_into_dataclasses()[0] except ValueError as e: __lowerCamelCase = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' __lowerCamelCase = ''' '''.join(str(SCREAMING_SNAKE_CASE_ ).split(''' ''' )[:-1] ) __lowerCamelCase = '''''' __lowerCamelCase = eval(str(SCREAMING_SNAKE_CASE_ ).split(''' ''' )[-1] ) __lowerCamelCase = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: __lowerCamelCase = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ ) raise ValueError(SCREAMING_SNAKE_CASE_ ) benchmark.run() if __name__ == "__main__": main()
67
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class a__ : """simple docstring""" __lowerCamelCase = BlenderbotSmallConfig __lowerCamelCase = {} __lowerCamelCase = 'gelu' def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A__ = tf.concat([input_ids, eos_tensor] , axis=1 ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder() A__ = inputs_dict["input_ids"] A__ = input_ids[:1, :] A__ = inputs_dict["attention_mask"][:1, :] A__ = inputs_dict["head_mask"] A__ = 1 # first forward pass A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) A__ , A__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A__ = tf.concat([input_ids, next_tokens] , axis=-1 ) A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A__ = model(lowercase , attention_mask=lowercase )[0] A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A__ = output_from_no_past[:, -3:, random_slice_idx] A__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = TFBlenderbotSmallModelTester(self ) A__ = ConfigTester(self , config_class=lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_tokenizers @require_tf class a__ ( unittest.TestCase ): """simple docstring""" __lowerCamelCase = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] __lowerCamelCase = 'facebook/blenderbot_small-90M' @cached_property def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.tokenizer(self.src_text , return_tensors="tf" ) A__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , ) A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
68
0
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ): _lowerCamelCase :str = StableUnCLIPPipeline _lowerCamelCase :str = TEXT_TO_IMAGE_PARAMS _lowerCamelCase :Dict = TEXT_TO_IMAGE_BATCH_PARAMS _lowerCamelCase :Any = TEXT_TO_IMAGE_IMAGE_PARAMS _lowerCamelCase :Any = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _lowerCamelCase :Tuple = False def _lowerCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : int = 32 lowerCAmelCase__ : List[Any] = embedder_hidden_size # prior components torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) lowerCAmelCase__ : Dict = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) lowerCAmelCase__ : Tuple = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase , num_layers=1 , ) torch.manual_seed(0 ) lowerCAmelCase__ : Dict = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=UpperCamelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) lowerCAmelCase__ : str = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) lowerCAmelCase__ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) lowerCAmelCase__ : Union[str, Any] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) lowerCAmelCase__ : Optional[Any] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , ) torch.manual_seed(0 ) lowerCAmelCase__ : Any = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=UpperCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = AutoencoderKL() lowerCAmelCase__ : Tuple = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def _lowerCAmelCase ( self : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int]=0 ) -> Optional[int]: """simple docstring""" if str(UpperCamelCase ).startswith("""mps""" ): lowerCAmelCase__ : str = torch.manual_seed(UpperCamelCase ) else: lowerCAmelCase__ : Optional[Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _lowerCAmelCase ( self : str ) -> str: """simple docstring""" lowerCAmelCase__ : Any = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase ) def _lowerCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" lowerCAmelCase__ : Tuple = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase ) @slow @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): def _lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" lowerCAmelCase__ : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) lowerCAmelCase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCAmelCase__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase__ : List[str] = pipe("""anime turle""" , generator=UpperCamelCase , output_type="""np""" ) lowerCAmelCase__ : Optional[Any] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) def _lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCAmelCase__ : Tuple = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) lowerCAmelCase__ : Optional[Any] = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCAmelCase__ : str = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) lowerCAmelCase__ : Tuple = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
242
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase__ = logging.get_logger(__name__) class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = ['pixel_values'] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None: '''simple docstring''' super().__init__(**lowercase ) A__ = size if size is not None else {"height": 384, "width": 384} A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = do_resize A__ = size A__ = resample A__ = do_rescale A__ = rescale_factor A__ = do_normalize A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A__ = image_std if image_std is not None else OPENAI_CLIP_STD A__ = do_convert_rgb def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' A__ = get_size_dict(lowercase , default_to_square=lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' ) A__ = (size["height"], size["width"]) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[Any]: '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image: '''simple docstring''' A__ = do_resize if do_resize is not None else self.do_resize A__ = resample if resample is not None else self.resample A__ = do_rescale if do_rescale is not None else self.do_rescale A__ = rescale_factor if rescale_factor is not None else self.rescale_factor A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = image_mean if image_mean is not None else self.image_mean A__ = image_std if image_std is not None else self.image_std A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A__ = size if size is not None else self.size A__ = get_size_dict(lowercase , default_to_square=lowercase ) A__ = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: A__ = [convert_to_rgb(lowercase ) for image in images] # All transformations expect numpy arrays. A__ = [to_numpy_array(lowercase ) for image in images] if do_resize: A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_rescale: A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images] A__ = BatchFeature(data={"pixel_values": images} , tensor_type=lowercase ) return encoded_outputs
68
0
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _lowerCamelCase =logging.get_logger(__name__) class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ): lowerCamelCase : Optional[Any] = feature_size lowerCamelCase : Optional[int] = sampling_rate lowerCamelCase : int = padding_value lowerCamelCase : List[Any] = kwargs.pop("""padding_side""" , """right""" ) lowerCamelCase : Tuple = kwargs.pop("""return_attention_mask""" , __magic_name__ ) super().__init__(**__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ): if isinstance(__magic_name__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): lowerCamelCase : List[str] = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( """You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`""" F''' to this method that includes {self.model_input_names[0]}, but you provided''' F''' {list(processed_features.keys() )}''' ) lowerCamelCase : Optional[int] = processed_features[self.model_input_names[0]] lowerCamelCase : Optional[Any] = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(__magic_name__ ) == 0: if return_attention_mask: lowerCamelCase : Optional[Any] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch lowerCamelCase : int = required_input[0] if isinstance(__magic_name__ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. lowerCamelCase : List[Any] = 0 while len(required_input[index] ) == 0: index += 1 if index < len(__magic_name__ ): lowerCamelCase : int = required_input[index][0] if return_tensors is None: if is_tf_tensor(__magic_name__ ): lowerCamelCase : Optional[Any] = """tf""" elif is_torch_tensor(__magic_name__ ): lowerCamelCase : List[str] = """pt""" elif isinstance(__magic_name__ , (int, float, list, tuple, np.ndarray) ): lowerCamelCase : Tuple = """np""" else: raise ValueError( F'''type of {first_element} unknown: {type(__magic_name__ )}. ''' """Should be one of a python, numpy, pytorch or tensorflow object.""" ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): lowerCamelCase : List[str] = to_numpy(__magic_name__ ) else: lowerCamelCase : List[Any] = [to_numpy(__magic_name__ ) for v in value] # Convert padding_strategy in PaddingStrategy lowerCamelCase : Optional[Any] = self._get_padding_strategies(padding=__magic_name__ , max_length=__magic_name__ ) lowerCamelCase : Tuple = processed_features[self.model_input_names[0]] lowerCamelCase : Tuple = len(__magic_name__ ) if not all(len(__magic_name__ ) == batch_size for v in processed_features.values() ): raise ValueError("""Some items in the output dictionary have a different batch size than others.""" ) lowerCamelCase : Any = [] for i in range(__magic_name__ ): lowerCamelCase : List[Any] = {k: v[i] for k, v in processed_features.items()} # truncation lowerCamelCase : List[Any] = self._truncate( __magic_name__ , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , truncation=__magic_name__ , ) truncated_inputs.append(__magic_name__ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length lowerCamelCase : Dict = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) lowerCamelCase : List[Any] = PaddingStrategy.MAX_LENGTH lowerCamelCase : Any = {} for i in range(__magic_name__ ): # padding lowerCamelCase : Any = self._pad( truncated_inputs[i] , max_length=__magic_name__ , padding_strategy=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , ) for key, value in outputs.items(): if key not in batch_outputs: lowerCamelCase : List[Any] = [] if value.dtype is np.dtype(np.floataa ): lowerCamelCase : Optional[Any] = value.astype(np.floataa ) batch_outputs[key].append(__magic_name__ ) return BatchFeature(__magic_name__ , tensor_type=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = PaddingStrategy.DO_NOT_PAD , __magic_name__ = None , __magic_name__ = None , ): lowerCamelCase : str = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: lowerCamelCase : Tuple = len(__magic_name__ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): lowerCamelCase : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of lowerCamelCase : int = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__magic_name__ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: lowerCamelCase : Optional[int] = np.ones(len(__magic_name__ ) , dtype=np.intaa ) if needs_to_be_padded: lowerCamelCase : Optional[Any] = max_length - len(__magic_name__ ) if self.padding_side == "right": if return_attention_mask: lowerCamelCase : Optional[int] = np.pad( processed_features["""attention_mask"""] , (0, difference) ) lowerCamelCase : str = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) lowerCamelCase : Any = np.pad( __magic_name__ , __magic_name__ , """constant""" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: lowerCamelCase : Any = np.pad( processed_features["""attention_mask"""] , (difference, 0) ) lowerCamelCase : Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) lowerCamelCase : Any = np.pad( __magic_name__ , __magic_name__ , """constant""" , constant_values=self.padding_value ) else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return processed_features def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" ) lowerCamelCase : Optional[Any] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): lowerCamelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of lowerCamelCase : List[str] = len(__magic_name__ ) > max_length if needs_to_be_truncated: lowerCamelCase : Any = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: lowerCamelCase : Union[str, Any] = processed_features["""attention_mask"""][:max_length] return processed_features def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=None ): if padding is not False: if padding is True: lowerCamelCase : Tuple = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase : str = PaddingStrategy(__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase : Optional[int] = padding else: lowerCamelCase : List[Any] = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( """Asking to pad but the feature_extractor does not have a padding value. Please select a value to use""" """ as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" ) return padding_strategy
287
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCAmelCase__ = """hf-internal-testing/tiny-random-bert""" lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") lowerCAmelCase__ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = cached_file(lowercase , lowercase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(lowercase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(lowercase , lowercase ) ) ) with open(os.path.join(lowercase , "refs" , "main" ) ) as f: A__ = f.read() self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) ) self.assertTrue(os.path.isfile(lowercase ) ) # File is cached at the same place the second time. A__ = cached_file(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) # Using a specific revision to test the full commit hash. A__ = cached_file(lowercase , lowercase , revision="9b8c223" ) self.assertEqual(lowercase , os.path.join(lowercase , "snapshots" , lowercase , lowercase ) ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ): A__ = cached_file("tiny-random-bert" , lowercase ) with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ): A__ = cached_file(lowercase , lowercase , revision="aaaa" ) with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ): A__ = cached_file(lowercase , "conf" ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' with self.assertRaisesRegex(lowercase , "does not appear to have a file named" ): A__ = cached_file(lowercase , "conf" ) with open(os.path.join(lowercase , "refs" , "main" ) ) as f: A__ = f.read() self.assertTrue(os.path.isfile(os.path.join(lowercase , ".no_exist" , lowercase , "conf" ) ) ) A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A__ = cached_file(lowercase , "conf" , local_files_only=lowercase , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A__ = mock.Mock() A__ = 500 A__ = {} A__ = HTTPError A__ = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=lowercase ) as mock_head: A__ = cached_file(lowercase , "conf" , _raise_exceptions_for_connection_errors=lowercase ) self.assertIsNone(lowercase ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowercase ) ) def UpperCamelCase ( self ) -> str: '''simple docstring''' self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(lowercase , "is not a valid model identifier" ): get_file_from_repo("bert-base-case" , lowercase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(lowercase , "is not a valid git identifier" ): get_file_from_repo("bert-base-cased" , lowercase , revision="ahaha" ) A__ = get_file_from_repo("bert-base-cased" , lowercase ) # The name is the cached name which is not very easy to test, so instead we load the content. A__ = json.loads(open(lowercase , "r" ).read() ) self.assertEqual(config["hidden_size"] , 768 ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: A__ = Path(lowercase ) / "a.txt" filename.touch() self.assertEqual(get_file_from_repo(lowercase , "a.txt" ) , str(lowercase ) ) self.assertIsNone(get_file_from_repo(lowercase , "b.txt" ) )
68
0
"""simple docstring""" import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''') # TF training parameters a = False a = False def _snake_case ( _snake_case : Namespace ) -> List[str]: '''simple docstring''' return TrainCommand(SCREAMING_SNAKE_CASE_ ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' @staticmethod def lowerCAmelCase_ ( _UpperCAmelCase : List[str] ): _A = parser.add_parser('train' , help='CLI tool to train a model on a task.' ) train_parser.add_argument( '--train_data' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , ) train_parser.add_argument( '--column_label' , type=_UpperCAmelCase , default=0 , help='Column of the dataset csv file with example labels.' ) train_parser.add_argument( '--column_text' , type=_UpperCAmelCase , default=1 , help='Column of the dataset csv file with example texts.' ) train_parser.add_argument( '--column_id' , type=_UpperCAmelCase , default=2 , help='Column of the dataset csv file with example ids.' ) train_parser.add_argument( '--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' ) train_parser.add_argument('--validation_data' , type=_UpperCAmelCase , default='' , help='path to validation dataset.' ) train_parser.add_argument( '--validation_split' , type=_UpperCAmelCase , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , ) train_parser.add_argument('--output' , type=_UpperCAmelCase , default='./' , help='path to saved the trained model.' ) train_parser.add_argument( '--task' , type=_UpperCAmelCase , default='text_classification' , help='Task to train the model on.' ) train_parser.add_argument( '--model' , type=_UpperCAmelCase , default='bert-base-uncased' , help='Model\'s name or path to stored model.' ) train_parser.add_argument('--train_batch_size' , type=_UpperCAmelCase , default=32 , help='Batch size for training.' ) train_parser.add_argument('--valid_batch_size' , type=_UpperCAmelCase , default=64 , help='Batch size for validation.' ) train_parser.add_argument('--learning_rate' , type=_UpperCAmelCase , default=3E-5 , help='Learning rate.' ) train_parser.add_argument('--adam_epsilon' , type=_UpperCAmelCase , default=1E-0_8 , help='Epsilon for Adam optimizer.' ) train_parser.set_defaults(func=_UpperCAmelCase ) def __init__( self : Any , _UpperCAmelCase : Dict ): _A = logging.get_logger('transformers-cli/training' ) _A = 'tf' if is_tf_available() else 'torch' os.makedirs(args.output , exist_ok=_UpperCAmelCase ) _A = args.output _A = args.column_label _A = args.column_text _A = args.column_id self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' ) if args.task == "text_classification": _A = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F'''Loading dataset from {args.train_data}''' ) _A = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) _A = None if args.validation_data: self.logger.info(F'''Loading validation dataset from {args.validation_data}''' ) _A = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) _A = args.validation_split _A = args.train_batch_size _A = args.valid_batch_size _A = args.learning_rate _A = args.adam_epsilon def lowerCAmelCase_ ( self : Optional[Any] ): if self.framework == "tf": return self.run_tf() return self.run_torch() def lowerCAmelCase_ ( self : List[str] ): raise NotImplementedError def lowerCAmelCase_ ( self : Tuple ): self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
315
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a__ ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = AutoencoderKL __lowerCamelCase = 'sample' __lowerCamelCase = 1e-2 @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = 4 A__ = 3 A__ = (32, 32) A__ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase ) return {"sample": image} @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 32, 32) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } A__ = self.dummy_input return init_dict, inputs_dict def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ , A__ = self.prepare_init_args_and_inputs_for_common() A__ = self.model_class(**lowercase ) model.to(lowercase ) assert not model.is_gradient_checkpointing and model.training A__ = model(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() A__ = torch.randn_like(lowercase ) A__ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing A__ = self.model_class(**lowercase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowercase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training A__ = model_a(**lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() A__ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) A__ = dict(model.named_parameters() ) A__ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ , A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(lowercase ) A__ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) A__ = model.to(lowercase ) model.eval() if torch_device == "mps": A__ = torch.manual_seed(0 ) else: A__ = torch.Generator(device=lowercase ).manual_seed(0 ) A__ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) A__ = image.to(lowercase ) with torch.no_grad(): A__ = model(lowercase , sample_posterior=lowercase , generator=lowercase ).sample A__ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": A__ = torch.tensor( [ -4.00_78e-01, -3.83_23e-04, -1.26_81e-01, -1.14_62e-01, 2.00_95e-01, 1.08_93e-01, -8.82_47e-02, -3.03_61e-01, -9.86_44e-03, ] ) elif torch_device == "cpu": A__ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: A__ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) ) @slow class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' return F'gaussian_noise_s={seed}_shape={"_".join([str(lowercase ) for s in shape] )}.npy' def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self , lowercase=0 , lowercase=(4, 3, 512, 512) , lowercase=False ) -> Optional[int]: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa A__ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) ).to(lowercase ).to(lowercase ) return image def UpperCamelCase ( self , lowercase="CompVis/stable-diffusion-v1-4" , lowercase=False ) -> Any: '''simple docstring''' A__ = "fp16" if fpaa else None A__ = torch.floataa if fpaa else torch.floataa A__ = AutoencoderKL.from_pretrained( lowercase , subfolder="vae" , torch_dtype=lowercase , revision=lowercase , ) model.to(lowercase ).eval() return model def UpperCamelCase ( self , lowercase=0 ) -> List[str]: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(lowercase ) return torch.Generator(device=lowercase ).manual_seed(lowercase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> int: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , fpaa=lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) with torch.no_grad(): A__ = model(lowercase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowercase , lowercase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Tuple: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(lowercase ) assert torch_all_close(lowercase , lowercase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = self.get_sd_vae_model(fpaa=lowercase ) A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase , lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def UpperCamelCase ( self , lowercase , lowercase ) -> str: '''simple docstring''' A__ = self.get_sd_vae_model() A__ = self.get_sd_image(lowercase ) A__ = self.get_generator(lowercase ) with torch.no_grad(): A__ = model.encode(lowercase ).latent_dist A__ = dist.sample(generator=lowercase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] A__ = sample[0, -1, -3:, -3:].flatten().cpu() A__ = torch.tensor(lowercase ) A__ = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(lowercase , lowercase , atol=lowercase )
68
0
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class _UpperCamelCase : '''simple docstring''' def __init__( self ): __lowerCAmelCase = {} def snake_case ( self , __a , __a , __a=1 ): if self.graph.get(__a ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: __lowerCAmelCase = [[w, v]] if not self.graph.get(__a ): __lowerCAmelCase = [] def snake_case ( self ): return list(self.graph ) def snake_case ( self , __a , __a ): if self.graph.get(__a ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__a ) def snake_case ( self , __a=-2 , __a=-1 ): if s == d: return [] __lowerCAmelCase = [] __lowerCAmelCase = [] if s == -2: __lowerCAmelCase = list(self.graph )[0] stack.append(__a ) visited.append(__a ) __lowerCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __lowerCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__a ) return visited else: stack.append(node[1] ) visited.append(node[1] ) __lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__a ) != 0: __lowerCAmelCase = stack[len(__a ) - 1] else: __lowerCAmelCase = ss # check if se have reached the starting point if len(__a ) == 0: return visited def snake_case ( self , __a=-1 ): if c == -1: __lowerCAmelCase = floor(random() * 1_00_00 ) + 10 for i in range(__a ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): __lowerCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(__a , __a , 1 ) def snake_case ( self , __a=-2 ): __lowerCAmelCase = deque() __lowerCAmelCase = [] if s == -2: __lowerCAmelCase = list(self.graph )[0] d.append(__a ) visited.append(__a ) while d: __lowerCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def snake_case ( self , __a ): __lowerCAmelCase = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def snake_case ( self , __a ): return len(self.graph[u] ) def snake_case ( self , __a=-2 ): __lowerCAmelCase = [] __lowerCAmelCase = [] if s == -2: __lowerCAmelCase = list(self.graph )[0] stack.append(__a ) visited.append(__a ) __lowerCAmelCase = s __lowerCAmelCase = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __lowerCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(__a ) != 0: __lowerCAmelCase = stack[len(__a ) - 1] else: __lowerCAmelCase = ss # check if se have reached the starting point if len(__a ) == 0: return sorted_nodes def snake_case ( self ): __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = list(self.graph )[0] stack.append(__a ) visited.append(__a ) __lowerCAmelCase = -2 __lowerCAmelCase = [] __lowerCAmelCase = s __lowerCAmelCase = False __lowerCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __lowerCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __lowerCAmelCase = len(__a ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() __lowerCAmelCase = True if len(__a ) != 0: __lowerCAmelCase = stack[len(__a ) - 1] else: __lowerCAmelCase = False indirect_parents.append(__a ) __lowerCAmelCase = s __lowerCAmelCase = ss # check if se have reached the starting point if len(__a ) == 0: return list(__a ) def snake_case ( self ): __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = list(self.graph )[0] stack.append(__a ) visited.append(__a ) __lowerCAmelCase = -2 __lowerCAmelCase = [] __lowerCAmelCase = s __lowerCAmelCase = False __lowerCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __lowerCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __lowerCAmelCase = len(__a ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() __lowerCAmelCase = True if len(__a ) != 0: __lowerCAmelCase = stack[len(__a ) - 1] else: __lowerCAmelCase = False indirect_parents.append(__a ) __lowerCAmelCase = s __lowerCAmelCase = ss # check if se have reached the starting point if len(__a ) == 0: return False def snake_case ( self , __a=-2 , __a=-1 ): __lowerCAmelCase = time() self.dfs(__a , __a ) __lowerCAmelCase = time() return end - begin def snake_case ( self , __a=-2 ): __lowerCAmelCase = time() self.bfs(__a ) __lowerCAmelCase = time() return end - begin class _UpperCamelCase : '''simple docstring''' def __init__( self ): __lowerCAmelCase = {} def snake_case ( self , __a , __a , __a=1 ): if self.graph.get(__a ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist __lowerCAmelCase = [[w, v]] # add the other way if self.graph.get(__a ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist __lowerCAmelCase = [[w, u]] def snake_case ( self , __a , __a ): if self.graph.get(__a ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__a ) # the other way round if self.graph.get(__a ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(__a ) def snake_case ( self , __a=-2 , __a=-1 ): if s == d: return [] __lowerCAmelCase = [] __lowerCAmelCase = [] if s == -2: __lowerCAmelCase = list(self.graph )[0] stack.append(__a ) visited.append(__a ) __lowerCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __lowerCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__a ) return visited else: stack.append(node[1] ) visited.append(node[1] ) __lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__a ) != 0: __lowerCAmelCase = stack[len(__a ) - 1] else: __lowerCAmelCase = ss # check if se have reached the starting point if len(__a ) == 0: return visited def snake_case ( self , __a=-1 ): if c == -1: __lowerCAmelCase = floor(random() * 1_00_00 ) + 10 for i in range(__a ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): __lowerCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(__a , __a , 1 ) def snake_case ( self , __a=-2 ): __lowerCAmelCase = deque() __lowerCAmelCase = [] if s == -2: __lowerCAmelCase = list(self.graph )[0] d.append(__a ) visited.append(__a ) while d: __lowerCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def snake_case ( self , __a ): return len(self.graph[u] ) def snake_case ( self ): __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = list(self.graph )[0] stack.append(__a ) visited.append(__a ) __lowerCAmelCase = -2 __lowerCAmelCase = [] __lowerCAmelCase = s __lowerCAmelCase = False __lowerCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __lowerCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __lowerCAmelCase = len(__a ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() __lowerCAmelCase = True if len(__a ) != 0: __lowerCAmelCase = stack[len(__a ) - 1] else: __lowerCAmelCase = False indirect_parents.append(__a ) __lowerCAmelCase = s __lowerCAmelCase = ss # check if se have reached the starting point if len(__a ) == 0: return list(__a ) def snake_case ( self ): __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = list(self.graph )[0] stack.append(__a ) visited.append(__a ) __lowerCAmelCase = -2 __lowerCAmelCase = [] __lowerCAmelCase = s __lowerCAmelCase = False __lowerCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __lowerCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __lowerCAmelCase = len(__a ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __lowerCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() __lowerCAmelCase = True if len(__a ) != 0: __lowerCAmelCase = stack[len(__a ) - 1] else: __lowerCAmelCase = False indirect_parents.append(__a ) __lowerCAmelCase = s __lowerCAmelCase = ss # check if se have reached the starting point if len(__a ) == 0: return False def snake_case ( self ): return list(self.graph ) def snake_case ( self , __a=-2 , __a=-1 ): __lowerCAmelCase = time() self.dfs(__a , __a ) __lowerCAmelCase = time() return end - begin def snake_case ( self , __a=-2 ): __lowerCAmelCase = time() self.bfs(__a ) __lowerCAmelCase = time() return end - begin
57
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCAmelCase__ = logging.getLogger(__name__) class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = label_idx def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: A__ = [] A__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 A__ = [] A__ = [] else: A__ = line.split(" " ) words.append(splits[0] ) if len(lowercase ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(lowercase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(lowercase ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class a__ ( snake_case ): """simple docstring""" def __init__( self ) -> Union[str, Any]: '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class a__ ( snake_case ): """simple docstring""" def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]: '''simple docstring''' if isinstance(lowercase , lowercase ): A__ = mode.value A__ = os.path.join(lowercase , F'{mode}.txt' ) A__ = 1 A__ = [] with open(lowercase , encoding="utf-8" ) as f: for sentence in parse_incr(lowercase ): A__ = [] A__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(lowercase ) == len(lowercase ) if words: examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) ) guid_index += 1 return examples def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = 0 for sentence in parse_incr(lowercase ): A__ = preds_list[example_id] A__ = "" for token in sentence: out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase ) example_id += 1 def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' if path: with open(lowercase , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
68
0
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=snake_case_ ) class a_ ( snake_case_ ): '''simple docstring''' UpperCamelCase = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCamelCase = Features({'''audio''': Audio()} ) UpperCamelCase = Features({'''transcription''': Value('''string''' )} ) UpperCamelCase = '''audio''' UpperCamelCase = '''transcription''' def snake_case_( self , A ) -> Union[str, Any]: if self.audio_column not in features: raise ValueError(f'Column {self.audio_column} is not present in features.' ) if not isinstance(features[self.audio_column] , A ): raise ValueError(f'Column {self.audio_column} is not an Audio type.' ) _SCREAMING_SNAKE_CASE = copy.deepcopy(self ) _SCREAMING_SNAKE_CASE = self.input_schema.copy() _SCREAMING_SNAKE_CASE = features[self.audio_column] _SCREAMING_SNAKE_CASE = input_schema return task_template @property def snake_case_( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
58
import random class a__ : """simple docstring""" @staticmethod def UpperCamelCase ( lowercase ) -> tuple[list[int], list[int]]: '''simple docstring''' A__ = [ord(lowercase ) for i in text] A__ = [] A__ = [] for i in plain: A__ = random.randint(1 , 300 ) A__ = (i + k) * k cipher.append(lowercase ) key.append(lowercase ) return cipher, key @staticmethod def UpperCamelCase ( lowercase , lowercase ) -> str: '''simple docstring''' A__ = [] for i in range(len(lowercase ) ): A__ = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowercase ) ) return "".join(lowercase ) if __name__ == "__main__": lowerCAmelCase__ , lowerCAmelCase__ = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
68
0
"""simple docstring""" import sys from collections import defaultdict class UpperCamelCase : def __init__( self) -> Optional[int]: snake_case_ = [] def a_ ( self, lowerCAmelCase__) -> Any: return self.node_position[vertex] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = pos def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: snake_case_ = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: snake_case_ = 2 * start + 1 else: snake_case_ = 2 * start + 2 if heap[smallest_child] < heap[start]: snake_case_ , snake_case_ = heap[smallest_child], positions[smallest_child] snake_case_ , snake_case_ = ( heap[start], positions[start], ) snake_case_ , snake_case_ = temp, tempa snake_case_ = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child], self.get_position(positions[start])) self.set_position(positions[start], lowerCAmelCase__) self.top_to_bottom(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]: snake_case_ = position[index] while index != 0: snake_case_ = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: snake_case_ = heap[parent] snake_case_ = position[parent] self.set_position(position[parent], lowerCAmelCase__) else: snake_case_ = val snake_case_ = temp self.set_position(lowerCAmelCase__, lowerCAmelCase__) break snake_case_ = parent else: snake_case_ = val snake_case_ = temp self.set_position(lowerCAmelCase__, 0) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = len(lowerCAmelCase__) // 2 - 1 for i in range(lowerCAmelCase__, -1, -1): self.top_to_bottom(lowerCAmelCase__, lowerCAmelCase__, len(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = positions[0] snake_case_ = sys.maxsize self.top_to_bottom(lowerCAmelCase__, 0, len(lowerCAmelCase__), lowerCAmelCase__) return temp def UpperCAmelCase ( UpperCAmelCase ) -> Tuple: snake_case_ = Heap() snake_case_ = [0] * len(UpperCAmelCase ) snake_case_ = [-1] * len(UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph snake_case_ = [] # Heap of Distance of vertices from their neighboring vertex snake_case_ = [] for vertex in range(len(UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCAmelCase ) heap.node_position.append(UpperCAmelCase ) snake_case_ = [] snake_case_ = 1 snake_case_ = sys.maxsize for neighbor, distance in adjacency_list[0]: snake_case_ = 0 snake_case_ = distance heap.heapify(UpperCAmelCase , UpperCAmelCase ) for _ in range(1 , len(UpperCAmelCase ) ): snake_case_ = heap.delete_minimum(UpperCAmelCase , UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) snake_case_ = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCAmelCase )] ): snake_case_ = distance heap.bottom_to_top( UpperCAmelCase , heap.get_position(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase ) snake_case_ = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __UpperCamelCase = int(input('''Enter number of edges: ''').strip()) __UpperCamelCase = defaultdict(list) for _ in range(edges_number): __UpperCamelCase = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
69
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = (DPMSolverSinglestepScheduler,) SCREAMING_SNAKE_CASE_ = (("num_inference_steps", 2_5),) def a_ ( self, **lowerCAmelCase__) -> int: snake_case_ = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf'), 'variance_type': None, } config.update(**lowerCAmelCase__) return config def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> List[Any]: snake_case_ = dict(self.forward_default_kwargs) snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config(**lowerCAmelCase__) snake_case_ = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals snake_case_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__) snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__) new_scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case_ , snake_case_ = sample, sample for t in range(lowerCAmelCase__, time_step + scheduler.config.solver_order + 1): snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def a_ ( self) -> Union[str, Any]: pass def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> int: snake_case_ = dict(self.forward_default_kwargs) snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config() snake_case_ = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals (must be after setting timesteps) snake_case_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__) snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residual (must be after setting timesteps) snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def a_ ( self, lowerCAmelCase__=None, **lowerCAmelCase__) -> Union[str, Any]: if scheduler is None: snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(**lowerCAmelCase__) snake_case_ = scheduler_class(**lowerCAmelCase__) snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(**lowerCAmelCase__) snake_case_ = scheduler_class(**lowerCAmelCase__) snake_case_ = 10 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__) for i, t in enumerate(scheduler.timesteps): snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample return sample def a_ ( self) -> List[Any]: snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) snake_case_ = 50 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:]): snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2574) < 1e-3 def a_ ( self) -> Dict: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__) def a_ ( self) -> Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) snake_case_ = self.full_loop(scheduler=lowerCAmelCase__) snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2791) < 1e-3 snake_case_ = DEISMultistepScheduler.from_config(scheduler.config) snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config) snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config) snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config) snake_case_ = self.full_loop(scheduler=lowerCAmelCase__) snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2791) < 1e-3 def a_ ( self) -> str: self.check_over_configs(thresholding=lowerCAmelCase__) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowerCAmelCase__, prediction_type=lowerCAmelCase__, sample_max_value=lowerCAmelCase__, algorithm_type='dpmsolver++', solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, ) def a_ ( self) -> Tuple: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__) def a_ ( self) -> Optional[int]: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, algorithm_type=lowerCAmelCase__, ) snake_case_ = self.full_loop( solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, algorithm_type=lowerCAmelCase__, ) assert not torch.isnan(lowerCAmelCase__).any(), "Samples have nan numbers" def a_ ( self) -> Optional[Any]: self.check_over_configs(lower_order_final=lowerCAmelCase__) self.check_over_configs(lower_order_final=lowerCAmelCase__) def a_ ( self) -> Any: self.check_over_configs(lambda_min_clipped=-float('inf')) self.check_over_configs(lambda_min_clipped=-5.1) def a_ ( self) -> Any: self.check_over_configs(variance_type=lowerCAmelCase__) self.check_over_configs(variance_type='learned_range') def a_ ( self) -> List[Any]: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=lowerCAmelCase__, time_step=0) def a_ ( self) -> int: snake_case_ = self.full_loop() snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2791) < 1e-3 def a_ ( self) -> Dict: snake_case_ = self.full_loop(use_karras_sigmas=lowerCAmelCase__) snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2248) < 1e-3 def a_ ( self) -> Union[str, Any]: snake_case_ = self.full_loop(prediction_type='v_prediction') snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.1453) < 1e-3 def a_ ( self) -> Optional[Any]: snake_case_ = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=lowerCAmelCase__) snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.0649) < 1e-3 def a_ ( self) -> Optional[int]: snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(thresholding=lowerCAmelCase__, dynamic_thresholding_ratio=0) snake_case_ = scheduler_class(**lowerCAmelCase__) snake_case_ = 10 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter.half() scheduler.set_timesteps(lowerCAmelCase__) for i, t in enumerate(scheduler.timesteps): snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample assert sample.dtype == torch.floataa
69
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "facebook/bart-large-mnli" SCREAMING_SNAKE_CASE_ = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) SCREAMING_SNAKE_CASE_ = "text_classifier" SCREAMING_SNAKE_CASE_ = AutoTokenizer SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification SCREAMING_SNAKE_CASE_ = ["text", ["text"]] SCREAMING_SNAKE_CASE_ = ["text"] def a_ ( self) -> Dict: super().setup() snake_case_ = self.model.config snake_case_ = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('entail'): snake_case_ = int(lowerCAmelCase__) if self.entailment_id == -1: raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.') def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = labels return self.pre_processor( [text] * len(lowerCAmelCase__), [f'This example is {label}' for label in labels], return_tensors='pt', padding='max_length', ) def a_ ( self, lowerCAmelCase__) -> Tuple: snake_case_ = outputs.logits snake_case_ = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
69
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: # Base Case if curr_ind == len(UpperCAmelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCAmelCase ) ): if valid_connection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): # Insert current vertex into path as next transition snake_case_ = next_ver # Validate created path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , curr_ind + 1 ): return True # Backtrack snake_case_ = -1 return False def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 0 ) -> list[int]: snake_case_ = [-1] * (len(UpperCAmelCase ) + 1) # initialize start and end of path with starting index snake_case_ = snake_case_ = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , 1 ) else []
69
1
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def UpperCAmelCase ( ) -> List[str]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(UpperCAmelCase ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def UpperCAmelCase ( ) -> Dict: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def UpperCAmelCase ( ) -> List[Any]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(UpperCAmelCase ): http_head('https://huggingface.co' )
69
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = '''▁''' __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} __UpperCamelCase = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } __UpperCamelCase = { '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off __UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] def __init__( self, lowerCAmelCase__, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__ = None, lowerCAmelCase__=None, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Union[str, Any]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs snake_case_ = legacy_behaviour super().__init__( bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(lowerCAmelCase__)) snake_case_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token snake_case_ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case_ = 1 snake_case_ = len(self.sp_model) snake_case_ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__) } snake_case_ = {v: k for k, v in self.lang_code_to_id.items()} snake_case_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} snake_case_ = list(self.lang_code_to_id.keys()) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens]) snake_case_ = src_lang if src_lang is not None else 'eng_Latn' snake_case_ = self.lang_code_to_id[self._src_lang] snake_case_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__( self) -> Union[str, Any]: snake_case_ = self.__dict__.copy() snake_case_ = None snake_case_ = self.sp_model.serialized_model_proto() return state def __setstate__( self, lowerCAmelCase__) -> Tuple: snake_case_ = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs'): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def a_ ( self) -> str: return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def a_ ( self) -> str: return self._src_lang @src_lang.setter def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__) snake_case_ = [1] * len(self.prefix_tokens) snake_case_ = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(lowerCAmelCase__)) + suffix_ones return prefix_ones + ([0] * len(lowerCAmelCase__)) + ([0] * len(lowerCAmelCase__)) + suffix_ones def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> str: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') snake_case_ = src_lang snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) snake_case_ = tgt_lang_id return inputs def a_ ( self) -> List[Any]: snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def a_ ( self, lowerCAmelCase__) -> List[str]: return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case_ = self.sp_model.PieceToId(lowerCAmelCase__) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def a_ ( self, lowerCAmelCase__) -> Dict: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def a_ ( self, lowerCAmelCase__) -> List[str]: snake_case_ = ''.join(lowerCAmelCase__).replace(lowerCAmelCase__, ' ').strip() return out_string def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, lowerCAmelCase__) elif not os.path.isfile(self.vocab_file): with open(lowerCAmelCase__, 'wb') as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__) return (out_vocab_file,) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "eng_Latn", lowerCAmelCase__ = None, lowerCAmelCase__ = "fra_Latn", **lowerCAmelCase__, ) -> BatchEncoding: snake_case_ = src_lang snake_case_ = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self) -> Union[str, Any]: return self.set_src_lang_special_tokens(self.src_lang) def a_ ( self) -> int: return self.set_tgt_lang_special_tokens(self.tgt_lang) def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = self.lang_code_to_id[src_lang] if self.legacy_behaviour: snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] else: snake_case_ = [self.cur_lang_code] snake_case_ = [self.eos_token_id] def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = self.lang_code_to_id[lang] if self.legacy_behaviour: snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] else: snake_case_ = [self.cur_lang_code] snake_case_ = [self.eos_token_id]
69
1
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput __UpperCamelCase = 8 def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=BITS ) -> Any: snake_case_ = x.device snake_case_ = (x * 255).int().clamp(0 , 255 ) snake_case_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCAmelCase ) snake_case_ = rearrange(UpperCAmelCase , 'd -> d 1 1' ) snake_case_ = rearrange(UpperCAmelCase , 'b c h w -> b c 1 h w' ) snake_case_ = ((x & mask) != 0).float() snake_case_ = rearrange(UpperCAmelCase , 'b c d h w -> b (c d) h w' ) snake_case_ = bits * 2 - 1 return bits def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=BITS ) -> Any: snake_case_ = x.device snake_case_ = (x > 0).int() snake_case_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCAmelCase , dtype=torch.intaa ) snake_case_ = rearrange(UpperCAmelCase , 'd -> d 1 1' ) snake_case_ = rearrange(UpperCAmelCase , 'b (c d) h w -> b c d h w' , d=8 ) snake_case_ = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' ) return (dec / 255).clamp(0.0 , 1.0 ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = True , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) snake_case_ = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas snake_case_ = self.alphas_cumprod[timestep] snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod snake_case_ = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf snake_case_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" snake_case_ = self.bit_scale if self.config.clip_sample: snake_case_ = torch.clamp(UpperCAmelCase , -scale , UpperCAmelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) snake_case_ = self._get_variance(UpperCAmelCase , UpperCAmelCase ) snake_case_ = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide snake_case_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf snake_case_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf snake_case_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 snake_case_ = model_output.device if torch.is_tensor(UpperCAmelCase ) else 'cpu' snake_case_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase ).to(UpperCAmelCase ) snake_case_ = self._get_variance(UpperCAmelCase , UpperCAmelCase ) ** 0.5 * eta * noise snake_case_ = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="epsilon" , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]: snake_case_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: snake_case_ , snake_case_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 ) else: snake_case_ = None # 1. compute alphas, betas snake_case_ = self.alphas_cumprod[t] snake_case_ = self.alphas_cumprod[t - 1] if t > 0 else self.one snake_case_ = 1 - alpha_prod_t snake_case_ = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": snake_case_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": snake_case_ = model_output else: raise ValueError(f'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" snake_case_ = self.bit_scale if self.config.clip_sample: snake_case_ = torch.clamp(UpperCAmelCase , -scale , UpperCAmelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf snake_case_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t snake_case_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf snake_case_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise snake_case_ = 0 if t > 0: snake_case_ = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCAmelCase ).to(model_output.device ) snake_case_ = (self._get_variance(UpperCAmelCase , predicted_variance=UpperCAmelCase ) ** 0.5) * noise snake_case_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase ) class UpperCamelCase ( lowerCAmelCase__ ): def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = 1.0, ) -> Optional[int]: super().__init__() snake_case_ = bit_scale snake_case_ = ( ddim_bit_scheduler_step if isinstance(lowerCAmelCase__, lowerCAmelCase__) else ddpm_bit_scheduler_step ) self.register_modules(unet=lowerCAmelCase__, scheduler=lowerCAmelCase__) @torch.no_grad() def __call__( self, lowerCAmelCase__ = 256, lowerCAmelCase__ = 256, lowerCAmelCase__ = 50, lowerCAmelCase__ = None, lowerCAmelCase__ = 1, lowerCAmelCase__ = "pil", lowerCAmelCase__ = True, **lowerCAmelCase__, ) -> Union[Tuple, ImagePipelineOutput]: snake_case_ = torch.randn( (batch_size, self.unet.config.in_channels, height, width), generator=lowerCAmelCase__, ) snake_case_ = decimal_to_bits(lowerCAmelCase__) * self.bit_scale snake_case_ = latents.to(self.device) self.scheduler.set_timesteps(lowerCAmelCase__) for t in self.progress_bar(self.scheduler.timesteps): # predict the noise residual snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__).sample # compute the previous noisy sample x_t -> x_t-1 snake_case_ = self.scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample snake_case_ = bits_to_decimal(lowerCAmelCase__) if output_type == "pil": snake_case_ = self.numpy_to_pil(lowerCAmelCase__) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase__)
69
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCAmelCase ( ) -> int: snake_case_ = HfArgumentParser(UpperCAmelCase ) snake_case_ = parser.parse_args_into_dataclasses()[0] snake_case_ = TensorFlowBenchmark(args=UpperCAmelCase ) try: snake_case_ = parser.parse_args_into_dataclasses()[0] except ValueError as e: snake_case_ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' snake_case_ = ' '.join(str(UpperCAmelCase ).split(' ' )[:-1] ) snake_case_ = '' snake_case_ = eval(str(UpperCAmelCase ).split(' ' )[-1] ) snake_case_ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(UpperCAmelCase ) if len(UpperCAmelCase ) > 0: snake_case_ = full_error_msg + begin_error_msg + str(UpperCAmelCase ) raise ValueError(UpperCAmelCase ) benchmark.run() if __name__ == "__main__": main()
69
1
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel __UpperCamelCase = logging.getLogger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: # save results if os.path.exists(UpperCAmelCase ): if os.path.exists(os.path.join(UpperCAmelCase , 'config.json' ) ) and os.path.isfile( os.path.join(UpperCAmelCase , 'config.json' ) ): os.remove(os.path.join(UpperCAmelCase , 'config.json' ) ) if os.path.exists(os.path.join(UpperCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(UpperCAmelCase , 'pytorch_model.bin' ) ): os.remove(os.path.join(UpperCAmelCase , 'pytorch_model.bin' ) ) else: os.makedirs(UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]: snake_case_ = 2 if unlogit: snake_case_ = torch.pow(UpperCAmelCase , UpperCAmelCase ) snake_case_ = p * torch.log(UpperCAmelCase ) snake_case_ = 0 return -plogp.sum(dim=-1 ) def UpperCAmelCase ( UpperCAmelCase ) -> List[str]: logger.info('lv, h >\t' + '\t'.join(f'{x + 1}' for x in range(len(UpperCAmelCase ) ) ) ) for row in range(len(UpperCAmelCase ) ): if tensor.dtype != torch.long: logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:.5f}' for x in tensor[row].cpu().data ) ) else: logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:d}' for x in tensor[row].cpu().data ) ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False ) -> Union[str, Any]: snake_case_ , snake_case_ = model.config.num_hidden_layers, model.config.num_attention_heads snake_case_ = torch.zeros(UpperCAmelCase , UpperCAmelCase ).to(args.device ) snake_case_ = torch.zeros(UpperCAmelCase , UpperCAmelCase ).to(args.device ) if head_mask is None: snake_case_ = torch.ones(UpperCAmelCase , UpperCAmelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=UpperCAmelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: snake_case_ = None snake_case_ = 0.0 snake_case_ = 0.0 for step, inputs in enumerate(tqdm(UpperCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): snake_case_ = tuple(t.to(args.device ) for t in inputs ) ((snake_case_) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) snake_case_ = model(UpperCAmelCase , labels=UpperCAmelCase , head_mask=UpperCAmelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) snake_case_ , snake_case_ , snake_case_ = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(UpperCAmelCase ): snake_case_ = entropy(attn.detach() , UpperCAmelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(UpperCAmelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: snake_case_ = 2 snake_case_ = torch.pow(torch.pow(UpperCAmelCase , UpperCAmelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: snake_case_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(UpperCAmelCase ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(UpperCAmelCase ) logger.info('Head ranked by importance scores' ) snake_case_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) snake_case_ = torch.arange( head_importance.numel() , device=args.device ) snake_case_ = head_ranks.view_as(UpperCAmelCase ) print_ad_tensor(UpperCAmelCase ) return attn_entropy, head_importance, total_loss def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase ) snake_case_ = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , UpperCAmelCase , original_score * args.masking_threshold ) snake_case_ = torch.ones_like(UpperCAmelCase ) snake_case_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) snake_case_ = original_score while current_score >= original_score * args.masking_threshold: snake_case_ = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads snake_case_ = float('Inf' ) snake_case_ = head_importance.view(-1 ).sort()[1] if len(UpperCAmelCase ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads snake_case_ = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) snake_case_ = new_head_mask.view(-1 ) snake_case_ = 0.0 snake_case_ = new_head_mask.view_as(UpperCAmelCase ) snake_case_ = new_head_mask.clone().detach() print_ad_tensor(UpperCAmelCase ) # Compute metric and head importance again snake_case_ , snake_case_ , snake_case_ = compute_heads_importance( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase , head_mask=UpperCAmelCase ) snake_case_ = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('Final head mask' ) print_ad_tensor(UpperCAmelCase ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: snake_case_ = datetime.now() snake_case_ , snake_case_ , snake_case_ = compute_heads_importance( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase , compute_importance=UpperCAmelCase , head_mask=UpperCAmelCase ) snake_case_ = 1 / loss snake_case_ = datetime.now() - before_time snake_case_ = sum(p.numel() for p in model.parameters() ) snake_case_ = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(UpperCAmelCase , UpperCAmelCase ): snake_case_ = [ v, ] assert sum(len(UpperCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(UpperCAmelCase ) snake_case_ = sum(p.numel() for p in model.parameters() ) snake_case_ = datetime.now() snake_case_ , snake_case_ , snake_case_ = compute_heads_importance( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase , compute_importance=UpperCAmelCase , head_mask=UpperCAmelCase , actually_pruned=UpperCAmelCase , ) snake_case_ = 1 / loss snake_case_ = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCAmelCase , UpperCAmelCase , pruned_num_params / original_num_params * 100 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCAmelCase , UpperCAmelCase ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 ) save_model(UpperCAmelCase , args.output_dir ) def UpperCAmelCase ( ) -> List[str]: snake_case_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=UpperCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=UpperCAmelCase , type=UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=UpperCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=UpperCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=UpperCAmelCase , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=UpperCAmelCase , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=128 , type=UpperCAmelCase , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=UpperCAmelCase , help='Batch size.' ) parser.add_argument('--seed' , type=UpperCAmelCase , default=42 ) parser.add_argument('--local_rank' , type=UpperCAmelCase , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=UpperCAmelCase , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=UpperCAmelCase , default='' , help='Can be used for distant debugging.' ) snake_case_ = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: snake_case_ = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) snake_case_ = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) snake_case_ = torch.device('cuda' , args.local_rank ) snake_case_ = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) snake_case_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: snake_case_ = nn.parallel.DistributedDataParallel( UpperCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase ) elif args.n_gpu > 1: snake_case_ = nn.DataParallel(UpperCAmelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=UpperCAmelCase ) torch.save(UpperCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , UpperCAmelCase ) # Prepare dataset snake_case_ = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) snake_case_ = (torch.from_numpy(UpperCAmelCase ),) snake_case_ = TensorDataset(*UpperCAmelCase ) snake_case_ = RandomSampler(UpperCAmelCase ) snake_case_ = DataLoader(UpperCAmelCase , sampler=UpperCAmelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: snake_case_ = mask_heads(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) prune_heads(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) if __name__ == "__main__": main()
69
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( UpperCAmelCase ) -> None: create_state_space_tree(UpperCAmelCase , [] , 0 , [0 for i in range(len(UpperCAmelCase ) )] ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None: if index == len(UpperCAmelCase ): print(UpperCAmelCase ) return for i in range(len(UpperCAmelCase ) ): if not index_used[i]: current_sequence.append(sequence[i] ) snake_case_ = True create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 , UpperCAmelCase ) current_sequence.pop() snake_case_ = False __UpperCamelCase = [3, 1, 2, 4] generate_all_permutations(sequence) __UpperCamelCase = ["A", "B", "C"] generate_all_permutations(sequence_a)
69
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "sew" def __init__( self, lowerCAmelCase__=32, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__=2, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__="group", lowerCAmelCase__="gelu", lowerCAmelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), lowerCAmelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowerCAmelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowerCAmelCase__=False, lowerCAmelCase__=128, lowerCAmelCase__=16, lowerCAmelCase__=True, lowerCAmelCase__=0.05, lowerCAmelCase__=10, lowerCAmelCase__=2, lowerCAmelCase__=0.0, lowerCAmelCase__=10, lowerCAmelCase__=0, lowerCAmelCase__="mean", lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=256, lowerCAmelCase__=0, lowerCAmelCase__=1, lowerCAmelCase__=2, **lowerCAmelCase__, ) -> str: super().__init__(**lowerCAmelCase__, pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__) snake_case_ = hidden_size snake_case_ = feat_extract_norm snake_case_ = feat_extract_activation snake_case_ = list(lowerCAmelCase__) snake_case_ = list(lowerCAmelCase__) snake_case_ = list(lowerCAmelCase__) snake_case_ = conv_bias snake_case_ = num_conv_pos_embeddings snake_case_ = num_conv_pos_embedding_groups snake_case_ = len(self.conv_dim) snake_case_ = num_hidden_layers snake_case_ = intermediate_size snake_case_ = squeeze_factor snake_case_ = hidden_act snake_case_ = num_attention_heads snake_case_ = hidden_dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = feat_proj_dropout snake_case_ = final_dropout snake_case_ = layerdrop snake_case_ = layer_norm_eps snake_case_ = initializer_range snake_case_ = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)' f'= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case_ = apply_spec_augment snake_case_ = mask_time_prob snake_case_ = mask_time_length snake_case_ = mask_time_min_masks snake_case_ = mask_feature_prob snake_case_ = mask_feature_length snake_case_ = mask_feature_min_masks # ctc loss snake_case_ = ctc_loss_reduction snake_case_ = ctc_zero_infinity # sequence classification snake_case_ = use_weighted_layer_sum snake_case_ = classifier_proj_size @property def a_ ( self) -> Optional[Any]: return functools.reduce(operator.mul, self.conv_stride, 1)
69
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = False ) -> bool: if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3317044064679887385961981 and not allow_probable: raise ValueError( 'Warning: upper bound of deterministic test is exceeded. ' 'Pass allow_probable=True to allow probabilistic test. ' 'A return value of True indicates a probable prime.' ) # array bounds provided by analysis snake_case_ = [ 2047, 1373653, 25326001, 3215031751, 2152302898747, 3474749660383, 341550071728321, 1, 3825123056546413051, 1, 1, 318665857834031151167461, 3317044064679887385961981, ] snake_case_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(UpperCAmelCase , 1 ): if n < _p: # then we have our last prime to check snake_case_ = primes[:idx] break snake_case_ , snake_case_ = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: snake_case_ = False for r in range(UpperCAmelCase ): snake_case_ = pow(UpperCAmelCase , d * 2**r , UpperCAmelCase ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): snake_case_ = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def UpperCAmelCase ( ) -> None: assert not miller_rabin(561 ) assert miller_rabin(563 ) # 2047 assert not miller_rabin(838201 ) assert miller_rabin(838207 ) # 1_373_653 assert not miller_rabin(17316001 ) assert miller_rabin(17316017 ) # 25_326_001 assert not miller_rabin(3078386641 ) assert miller_rabin(3078386653 ) # 3_215_031_751 assert not miller_rabin(1713045574801 ) assert miller_rabin(1713045574819 ) # 2_152_302_898_747 assert not miller_rabin(2779799728307 ) assert miller_rabin(2779799728327 ) # 3_474_749_660_383 assert not miller_rabin(113850023909441 ) assert miller_rabin(113850023909527 ) # 341_550_071_728_321 assert not miller_rabin(1275041018848804351 ) assert miller_rabin(1275041018848804391 ) # 3_825_123_056_546_413_051 assert not miller_rabin(79666464458507787791867 ) assert miller_rabin(79666464458507787791951 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(552840677446647897660333 ) assert miller_rabin(552840677446647897660359 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
69
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "markuplm" def __init__( self, lowerCAmelCase__=3_0522, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=512, lowerCAmelCase__=2, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=0, lowerCAmelCase__=0, lowerCAmelCase__=2, lowerCAmelCase__=256, lowerCAmelCase__=1024, lowerCAmelCase__=216, lowerCAmelCase__=1001, lowerCAmelCase__=32, lowerCAmelCase__=50, lowerCAmelCase__="absolute", lowerCAmelCase__=True, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> int: super().__init__( pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = position_embedding_type snake_case_ = use_cache snake_case_ = classifier_dropout # additional properties snake_case_ = max_depth snake_case_ = max_xpath_tag_unit_embeddings snake_case_ = max_xpath_subs_unit_embeddings snake_case_ = tag_pad_id snake_case_ = subs_pad_id snake_case_ = xpath_unit_hidden_size
69
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCamelCase = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
69
1
"""simple docstring""" from __future__ import annotations from collections import deque class UpperCamelCase : def __init__( self, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = [] self.adlist.append( {'value': '', 'next_states': [], 'fail_state': 0, 'output': []}) for keyword in keywords: self.add_keyword(lowerCAmelCase__) self.set_fail_transitions() def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = 0 for character in keyword: snake_case_ = self.find_next_state(lowerCAmelCase__, lowerCAmelCase__) if next_state is None: self.adlist.append( { 'value': character, 'next_states': [], 'fail_state': 0, 'output': [], }) self.adlist[current_state]["next_states"].append(len(self.adlist) - 1) snake_case_ = len(self.adlist) - 1 else: snake_case_ = next_state self.adlist[current_state]["output"].append(lowerCAmelCase__) def a_ ( self) -> None: snake_case_ = deque() for node in self.adlist[0]["next_states"]: q.append(lowerCAmelCase__) snake_case_ = 0 while q: snake_case_ = q.popleft() for child in self.adlist[r]["next_states"]: q.append(lowerCAmelCase__) snake_case_ = self.adlist[r]['fail_state'] while ( self.find_next_state(lowerCAmelCase__, self.adlist[child]['value']) is None and state != 0 ): snake_case_ = self.adlist[state]['fail_state'] snake_case_ = self.find_next_state( lowerCAmelCase__, self.adlist[child]['value']) if self.adlist[child]["fail_state"] is None: snake_case_ = 0 snake_case_ = ( self.adlist[child]['output'] + self.adlist[self.adlist[child]['fail_state']]['output'] ) def a_ ( self, lowerCAmelCase__) -> dict[str, list[int]]: snake_case_ = {} # returns a dict with keywords and list of its occurrences snake_case_ = 0 for i in range(len(lowerCAmelCase__)): while ( self.find_next_state(lowerCAmelCase__, string[i]) is None and current_state != 0 ): snake_case_ = self.adlist[current_state]['fail_state'] snake_case_ = self.find_next_state(lowerCAmelCase__, string[i]) if next_state is None: snake_case_ = 0 else: snake_case_ = next_state for key in self.adlist[current_state]["output"]: if key not in result: snake_case_ = [] result[key].append(i - len(lowerCAmelCase__) + 1) return result if __name__ == "__main__": import doctest doctest.testmod()
69
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase = { '''facebook/nllb-large-en-ro''': 1024, '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off __UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = NllbTokenizer SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token snake_case_ = legacy_behaviour super().__init__( vocab_file=lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, legacy_behaviour=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True snake_case_ = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens}) snake_case_ = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase__) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case_ = src_lang if src_lang is not None else 'eng_Latn' snake_case_ = self.convert_tokens_to_ids(self._src_lang) snake_case_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def a_ ( self) -> str: return self._src_lang @src_lang.setter def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> str: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') snake_case_ = src_lang snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) snake_case_ = tgt_lang_id return inputs def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "eng_Latn", lowerCAmelCase__ = None, lowerCAmelCase__ = "fra_Latn", **lowerCAmelCase__, ) -> BatchEncoding: snake_case_ = src_lang snake_case_ = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self) -> List[Any]: return self.set_src_lang_special_tokens(self.src_lang) def a_ ( self) -> Tuple: return self.set_tgt_lang_special_tokens(self.tgt_lang) def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) if self.legacy_behaviour: snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] else: snake_case_ = [self.cur_lang_code] snake_case_ = [self.eos_token_id] snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens) snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens) snake_case_ = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) if self.legacy_behaviour: snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] else: snake_case_ = [self.cur_lang_code] snake_case_ = [self.eos_token_id] snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens) snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens) snake_case_ = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory.') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file, lowerCAmelCase__) return (out_vocab_file,)
69
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = 42 class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self, lowerCAmelCase__ = 6_5536, lowerCAmelCase__ = None, lowerCAmelCase__ = 2, lowerCAmelCase__ = 2, lowerCAmelCase__ = 0, lowerCAmelCase__ = "fourier", lowerCAmelCase__ = True, lowerCAmelCase__ = False, lowerCAmelCase__ = 0.0, lowerCAmelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), lowerCAmelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), lowerCAmelCase__ = "UNetMidBlock1D", lowerCAmelCase__ = None, lowerCAmelCase__ = (32, 32, 64), lowerCAmelCase__ = None, lowerCAmelCase__ = 8, lowerCAmelCase__ = 1, lowerCAmelCase__ = False, ) -> Union[str, Any]: super().__init__() snake_case_ = sample_size # time if time_embedding_type == "fourier": snake_case_ = GaussianFourierProjection( embedding_size=8, set_W_to_weight=lowerCAmelCase__, log=lowerCAmelCase__, flip_sin_to_cos=lowerCAmelCase__) snake_case_ = 2 * block_out_channels[0] elif time_embedding_type == "positional": snake_case_ = Timesteps( block_out_channels[0], flip_sin_to_cos=lowerCAmelCase__, downscale_freq_shift=lowerCAmelCase__) snake_case_ = block_out_channels[0] if use_timestep_embedding: snake_case_ = block_out_channels[0] * 4 snake_case_ = TimestepEmbedding( in_channels=lowerCAmelCase__, time_embed_dim=lowerCAmelCase__, act_fn=lowerCAmelCase__, out_dim=block_out_channels[0], ) snake_case_ = nn.ModuleList([]) snake_case_ = None snake_case_ = nn.ModuleList([]) snake_case_ = None # down snake_case_ = in_channels for i, down_block_type in enumerate(lowerCAmelCase__): snake_case_ = output_channel snake_case_ = block_out_channels[i] if i == 0: input_channel += extra_in_channels snake_case_ = i == len(lowerCAmelCase__) - 1 snake_case_ = get_down_block( lowerCAmelCase__, num_layers=lowerCAmelCase__, in_channels=lowerCAmelCase__, out_channels=lowerCAmelCase__, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, ) self.down_blocks.append(lowerCAmelCase__) # mid snake_case_ = get_mid_block( lowerCAmelCase__, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=lowerCAmelCase__, add_downsample=lowerCAmelCase__, ) # up snake_case_ = list(reversed(lowerCAmelCase__)) snake_case_ = reversed_block_out_channels[0] if out_block_type is None: snake_case_ = out_channels else: snake_case_ = block_out_channels[0] for i, up_block_type in enumerate(lowerCAmelCase__): snake_case_ = output_channel snake_case_ = ( reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase__) - 1 else final_upsample_channels ) snake_case_ = i == len(lowerCAmelCase__) - 1 snake_case_ = get_up_block( lowerCAmelCase__, num_layers=lowerCAmelCase__, in_channels=lowerCAmelCase__, out_channels=lowerCAmelCase__, temb_channels=block_out_channels[0], add_upsample=not is_final_block, ) self.up_blocks.append(lowerCAmelCase__) snake_case_ = output_channel # out snake_case_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) snake_case_ = get_out_block( out_block_type=lowerCAmelCase__, num_groups_out=lowerCAmelCase__, embed_dim=block_out_channels[0], out_channels=lowerCAmelCase__, act_fn=lowerCAmelCase__, fc_dim=block_out_channels[-1] // 4, ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = True, ) -> Union[UNetaDOutput, Tuple]: snake_case_ = timestep if not torch.is_tensor(lowerCAmelCase__): snake_case_ = torch.tensor([timesteps], dtype=torch.long, device=sample.device) elif torch.is_tensor(lowerCAmelCase__) and len(timesteps.shape) == 0: snake_case_ = timesteps[None].to(sample.device) snake_case_ = self.time_proj(lowerCAmelCase__) if self.config.use_timestep_embedding: snake_case_ = self.time_mlp(lowerCAmelCase__) else: snake_case_ = timestep_embed[..., None] snake_case_ = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) snake_case_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down snake_case_ = () for downsample_block in self.down_blocks: snake_case_ , snake_case_ = downsample_block(hidden_states=lowerCAmelCase__, temb=lowerCAmelCase__) down_block_res_samples += res_samples # 3. mid if self.mid_block: snake_case_ = self.mid_block(lowerCAmelCase__, lowerCAmelCase__) # 4. up for i, upsample_block in enumerate(self.up_blocks): snake_case_ = down_block_res_samples[-1:] snake_case_ = down_block_res_samples[:-1] snake_case_ = upsample_block(lowerCAmelCase__, res_hidden_states_tuple=lowerCAmelCase__, temb=lowerCAmelCase__) # 5. post-process if self.out_block: snake_case_ = self.out_block(lowerCAmelCase__, lowerCAmelCase__) if not return_dict: return (sample,) return UNetaDOutput(sample=lowerCAmelCase__)
69
"""simple docstring""" from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = '''T5Config''' class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "mt5" SCREAMING_SNAKE_CASE_ = MTaConfig class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "mt5" SCREAMING_SNAKE_CASE_ = MTaConfig class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "mt5" SCREAMING_SNAKE_CASE_ = MTaConfig
69
1
"""simple docstring""" from scipy.stats import pearsonr import datasets __UpperCamelCase = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' __UpperCamelCase = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' __UpperCamelCase = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): def a_ ( self) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Value('float'), 'references': datasets.Value('float'), }), reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'], ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=False) -> List[Any]: if return_pvalue: snake_case_ = pearsonr(lowerCAmelCase__, lowerCAmelCase__) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowerCAmelCase__, lowerCAmelCase__)[0])}
69
"""simple docstring""" import argparse __UpperCamelCase = '''docs/source/_static/js/custom.js''' def UpperCAmelCase ( UpperCAmelCase ) -> int: with open(UpperCAmelCase , encoding='utf-8' , newline='\n' ) as f: snake_case_ = f.readlines() snake_case_ = 0 # First let's put the right version while not lines[index].startswith('const stableVersion =' ): index += 1 snake_case_ = f'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith('const versionMapping = {' ): index += 1 # We go until the end while not lines[index].startswith('}' ): index += 1 # We add the new version at the end lines[index - 1] += f' "v{version}": "v{version}",\n' with open(UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(UpperCAmelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') __UpperCamelCase = parser.parse_args() update_custom_js(args.version)
69
1
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''') class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = BartphoTokenizer SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = True def a_ ( self) -> int: super().setUp() snake_case_ = ['▁This', '▁is', '▁a', '▁t', 'est'] snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__)))) snake_case_ = {'unk_token': '<unk>'} snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['monolingual_vocab_file']) with open(self.monolingual_vocab_file, 'w', encoding='utf-8') as fp: for token in vocab_tokens: fp.write(f'{token} {vocab_tokens[token]}\n') snake_case_ = BartphoTokenizer(lowerCAmelCase__, self.monolingual_vocab_file, **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]: kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = 'This is a là test' snake_case_ = 'This is a<unk><unk> test' return input_text, output_text def a_ ( self) -> Tuple: snake_case_ = BartphoTokenizer(lowerCAmelCase__, self.monolingual_vocab_file, **self.special_tokens_map) snake_case_ = 'This is a là test' snake_case_ = '▁This ▁is ▁a ▁l à ▁t est'.split() snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokens + [tokenizer.unk_token] snake_case_ = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__), lowerCAmelCase__)
69
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Any class UpperCamelCase : def __init__( self, lowerCAmelCase__) -> Optional[int]: snake_case_ = data snake_case_ = None class UpperCamelCase : def __init__( self) -> Dict: snake_case_ = None snake_case_ = None def __iter__( self) -> Iterator[Any]: snake_case_ = self.head while self.head: yield node.data snake_case_ = node.next if node == self.head: break def __len__( self) -> int: return sum(1 for _ in self) def __repr__( self) -> str: return "->".join(str(lowerCAmelCase__) for item in iter(self)) def a_ ( self, lowerCAmelCase__) -> None: self.insert_nth(len(self), lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> None: self.insert_nth(0, lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> None: if index < 0 or index > len(self): raise IndexError('list index out of range.') snake_case_ = Node(lowerCAmelCase__) if self.head is None: snake_case_ = new_node # first node points itself snake_case_ = snake_case_ = new_node elif index == 0: # insert at head snake_case_ = self.head snake_case_ = snake_case_ = new_node else: snake_case_ = self.head for _ in range(index - 1): snake_case_ = temp.next snake_case_ = temp.next snake_case_ = new_node if index == len(self) - 1: # insert at tail snake_case_ = new_node def a_ ( self) -> str: return self.delete_nth(0) def a_ ( self) -> Any: return self.delete_nth(len(self) - 1) def a_ ( self, lowerCAmelCase__ = 0) -> Any: if not 0 <= index < len(self): raise IndexError('list index out of range.') snake_case_ = self.head if self.head == self.tail: # just one node snake_case_ = snake_case_ = None elif index == 0: # delete head node snake_case_ = self.tail.next.next snake_case_ = self.head.next else: snake_case_ = self.head for _ in range(index - 1): snake_case_ = temp.next snake_case_ = temp.next snake_case_ = temp.next.next if index == len(self) - 1: # delete at tail snake_case_ = temp return delete_node.data def a_ ( self) -> bool: return len(self) == 0 def UpperCAmelCase ( ) -> None: snake_case_ = CircularLinkedList() assert len(UpperCAmelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(UpperCAmelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(UpperCAmelCase ) == i circular_linked_list.insert_nth(UpperCAmelCase , i + 1 ) assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
69
1
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def UpperCAmelCase ( UpperCAmelCase ) -> Dict: snake_case_ = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> Dict: snake_case_ , snake_case_ = emb.weight.shape snake_case_ = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase ) snake_case_ = emb.weight.data return lin_layer def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase="facebook/mbart-large-en-ro" , UpperCAmelCase=False , UpperCAmelCase=False ) -> Optional[int]: snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model'] remove_ignore_keys_(UpperCAmelCase ) snake_case_ = state_dict['encoder.embed_tokens.weight'].shape[0] snake_case_ = MBartConfig.from_pretrained(UpperCAmelCase , vocab_size=UpperCAmelCase ) if mbart_aa and finetuned: snake_case_ = 'relu' snake_case_ = state_dict['decoder.embed_tokens.weight'] snake_case_ = MBartForConditionalGeneration(UpperCAmelCase ) model.model.load_state_dict(UpperCAmelCase ) if finetuned: snake_case_ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') __UpperCamelCase = parser.parse_args() __UpperCamelCase = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
69
"""simple docstring""" import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __UpperCamelCase = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. __UpperCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS) __UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __UpperCamelCase = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') __UpperCamelCase = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]: snake_case_ = None # source code of `config_class` snake_case_ = inspect.getsource(UpperCAmelCase ) snake_case_ = _re_checkpoint.findall(UpperCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('/' ): snake_case_ = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link snake_case_ = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: snake_case_ = ckpt_name break return checkpoint def UpperCAmelCase ( ) -> Union[str, Any]: snake_case_ = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue snake_case_ = get_checkpoint_from_config_class(UpperCAmelCase ) snake_case_ = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(UpperCAmelCase ) if len(UpperCAmelCase ) > 0: snake_case_ = '\n'.join(sorted(UpperCAmelCase ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
69
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WavLMForAudioFrameClassification''', '''WavLMForCTC''', '''WavLMForSequenceClassification''', '''WavLMForXVector''', '''WavLMModel''', '''WavLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
69
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCamelCase = 0 __UpperCamelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCamelCase = tuple[int, int] class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> None: snake_case_ = pos_x snake_case_ = pos_y snake_case_ = (pos_y, pos_x) snake_case_ = goal_x snake_case_ = goal_y snake_case_ = g_cost snake_case_ = parent snake_case_ = self.calculate_heuristic() snake_case_ = self.g_cost + self.h_cost def a_ ( self) -> float: snake_case_ = self.pos_x - self.goal_x snake_case_ = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCAmelCase__) + abs(lowerCAmelCase__) else: return sqrt(dy**2 + dx**2) def __lt__( self, lowerCAmelCase__) -> bool: return self.f_cost < other.f_cost class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = Node(start[1], start[0], goal[1], goal[0], 0, lowerCAmelCase__) snake_case_ = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, lowerCAmelCase__) snake_case_ = [self.start] snake_case_ = [] snake_case_ = False def a_ ( self) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() snake_case_ = self.open_nodes.pop(0) if current_node.pos == self.target.pos: return self.retrace_path(lowerCAmelCase__) self.closed_nodes.append(lowerCAmelCase__) snake_case_ = self.get_successors(lowerCAmelCase__) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCAmelCase__) else: # retrieve the best current path snake_case_ = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__)) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCAmelCase__) else: self.open_nodes.append(lowerCAmelCase__) return [self.start.pos] def a_ ( self, lowerCAmelCase__) -> list[Node]: snake_case_ = [] for action in delta: snake_case_ = parent.pos_x + action[1] snake_case_ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(lowerCAmelCase__) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCAmelCase__, lowerCAmelCase__, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, lowerCAmelCase__, )) return successors def a_ ( self, lowerCAmelCase__) -> list[TPosition]: snake_case_ = node snake_case_ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) snake_case_ = current_node.parent path.reverse() return path class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> None: snake_case_ = AStar(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = AStar(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = False def a_ ( self) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() snake_case_ = self.fwd_astar.open_nodes.pop(0) snake_case_ = self.bwd_astar.open_nodes.pop(0) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCAmelCase__, lowerCAmelCase__) self.fwd_astar.closed_nodes.append(lowerCAmelCase__) self.bwd_astar.closed_nodes.append(lowerCAmelCase__) snake_case_ = current_bwd_node snake_case_ = current_fwd_node snake_case_ = { self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase__), self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase__), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCAmelCase__) else: # retrieve the best current path snake_case_ = astar.open_nodes.pop( astar.open_nodes.index(lowerCAmelCase__)) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCAmelCase__) else: astar.open_nodes.append(lowerCAmelCase__) return [self.fwd_astar.start.pos] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> list[TPosition]: snake_case_ = self.fwd_astar.retrace_path(lowerCAmelCase__) snake_case_ = self.bwd_astar.retrace_path(lowerCAmelCase__) bwd_path.pop() bwd_path.reverse() snake_case_ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCamelCase = (0, 0) __UpperCamelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCamelCase = time.time() __UpperCamelCase = AStar(init, goal) __UpperCamelCase = a_star.search() __UpperCamelCase = time.time() - start_time print(F"""AStar execution time = {end_time:f} seconds""") __UpperCamelCase = time.time() __UpperCamelCase = BidirectionalAStar(init, goal) __UpperCamelCase = time.time() - bd_start_time print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
69
1
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all BART models at https://huggingface.co/models?filter=bart __UpperCamelCase = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, } __UpperCamelCase = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } @lru_cache() def UpperCAmelCase ( ) -> Dict: snake_case_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) snake_case_ = bs[:] snake_case_ = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCAmelCase ) cs.append(2**8 + n ) n += 1 snake_case_ = [chr(UpperCAmelCase ) for n in cs] return dict(zip(UpperCAmelCase , UpperCAmelCase ) ) def UpperCAmelCase ( UpperCAmelCase ) -> int: snake_case_ = set() snake_case_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case_ = char return pairs class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Union[str, Any]: snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else bos_token snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else eos_token snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else sep_token snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else cls_token snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else unk_token snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token super().__init__( errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, **lowerCAmelCase__, ) with open(lowerCAmelCase__, encoding='utf-8') as vocab_handle: snake_case_ = json.load(lowerCAmelCase__) snake_case_ = {v: k for k, v in self.encoder.items()} snake_case_ = errors # how to handle errors in decoding snake_case_ = bytes_to_unicode() snake_case_ = {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase__, encoding='utf-8') as merges_handle: snake_case_ = merges_handle.read().split('\n')[1:-1] snake_case_ = [tuple(merge.split()) for merge in bpe_merges] snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__)))) snake_case_ = {} snake_case_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions snake_case_ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property def a_ ( self) -> Optional[int]: return len(self.encoder) def a_ ( self) -> Optional[Any]: return dict(self.encoder, **self.added_tokens_encoder) def a_ ( self, lowerCAmelCase__) -> Union[str, Any]: if token in self.cache: return self.cache[token] snake_case_ = tuple(lowerCAmelCase__) snake_case_ = get_pairs(lowerCAmelCase__) if not pairs: return token while True: snake_case_ = min(lowerCAmelCase__, key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__, float('inf'))) if bigram not in self.bpe_ranks: break snake_case_ , snake_case_ = bigram snake_case_ = [] snake_case_ = 0 while i < len(lowerCAmelCase__): try: snake_case_ = word.index(lowerCAmelCase__, lowerCAmelCase__) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) snake_case_ = j if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 snake_case_ = tuple(lowerCAmelCase__) snake_case_ = new_word if len(lowerCAmelCase__) == 1: break else: snake_case_ = get_pairs(lowerCAmelCase__) snake_case_ = ' '.join(lowerCAmelCase__) snake_case_ = word return word def a_ ( self, lowerCAmelCase__) -> Dict: snake_case_ = [] for token in re.findall(self.pat, lowerCAmelCase__): snake_case_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(' ')) return bpe_tokens def a_ ( self, lowerCAmelCase__) -> List[str]: return self.encoder.get(lowerCAmelCase__, self.encoder.get(self.unk_token)) def a_ ( self, lowerCAmelCase__) -> int: return self.decoder.get(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> Optional[int]: snake_case_ = ''.join(lowerCAmelCase__) snake_case_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(lowerCAmelCase__, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase__, ensure_ascii=lowerCAmelCase__) + '\n') snake_case_ = 0 with open(lowerCAmelCase__, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase__: kv[1]): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ' Please check that the tokenizer is not corrupted!') snake_case_ = token_index writer.write(' '.join(lowerCAmelCase__) + '\n') index += 1 return vocab_file, merge_file def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__)) + [1] return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=False, **lowerCAmelCase__) -> int: snake_case_ = kwargs.pop('add_prefix_space', self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()): snake_case_ = ' ' + text return (text, kwargs)
69
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int: while a != 0: snake_case_ , snake_case_ = b % a, a return b def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int: if gcd(UpperCAmelCase , UpperCAmelCase ) != 1: snake_case_ = f'mod inverse of {a!r} and {m!r} does not exist' raise ValueError(UpperCAmelCase ) snake_case_ , snake_case_ , snake_case_ = 1, 0, a snake_case_ , snake_case_ , snake_case_ = 0, 1, m while va != 0: snake_case_ = ua // va snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
69
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> List[str]: snake_case_ = tempfile.mkdtemp() snake_case_ = SamImageProcessor() snake_case_ = SamProcessor(lowerCAmelCase__) processor.save_pretrained(self.tmpdirname) def a_ ( self, **lowerCAmelCase__) -> List[Any]: return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__).image_processor def a_ ( self) -> int: shutil.rmtree(self.tmpdirname) def a_ ( self) -> Optional[int]: snake_case_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)] snake_case_ = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1)) for x in image_inputs] return image_inputs def a_ ( self) -> Union[str, Any]: snake_case_ = SamProcessor(image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) snake_case_ = self.get_image_processor(do_normalize=lowerCAmelCase__, padding_value=1.0) snake_case_ = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=lowerCAmelCase__, padding_value=1.0) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, lowerCAmelCase__) def a_ ( self) -> int: snake_case_ = self.get_image_processor() snake_case_ = SamProcessor(image_processor=lowerCAmelCase__) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(lowerCAmelCase__, return_tensors='np') snake_case_ = processor(images=lowerCAmelCase__, return_tensors='np') input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) @require_torch def a_ ( self) -> List[Any]: snake_case_ = self.get_image_processor() snake_case_ = SamProcessor(image_processor=lowerCAmelCase__) snake_case_ = [torch.ones((1, 3, 5, 5))] snake_case_ = [[1764, 2646]] snake_case_ = [[683, 1024]] snake_case_ = processor.post_process_masks(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) snake_case_ = processor.post_process_masks( lowerCAmelCase__, torch.tensor(lowerCAmelCase__), torch.tensor(lowerCAmelCase__)) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) # should also work with np snake_case_ = [np.ones((1, 3, 5, 5))] snake_case_ = processor.post_process_masks(lowerCAmelCase__, np.array(lowerCAmelCase__), np.array(lowerCAmelCase__)) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) snake_case_ = [[1, 0], [0, 1]] with self.assertRaises(lowerCAmelCase__): snake_case_ = processor.post_process_masks(lowerCAmelCase__, np.array(lowerCAmelCase__), np.array(lowerCAmelCase__)) @require_vision @require_tf class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> str: snake_case_ = tempfile.mkdtemp() snake_case_ = SamImageProcessor() snake_case_ = SamProcessor(lowerCAmelCase__) processor.save_pretrained(self.tmpdirname) def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]: return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__).image_processor def a_ ( self) -> Tuple: shutil.rmtree(self.tmpdirname) def a_ ( self) -> Dict: snake_case_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)] snake_case_ = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1)) for x in image_inputs] return image_inputs def a_ ( self) -> Optional[int]: snake_case_ = SamProcessor(image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) snake_case_ = self.get_image_processor(do_normalize=lowerCAmelCase__, padding_value=1.0) snake_case_ = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=lowerCAmelCase__, padding_value=1.0) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, lowerCAmelCase__) def a_ ( self) -> Optional[Any]: snake_case_ = self.get_image_processor() snake_case_ = SamProcessor(image_processor=lowerCAmelCase__) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(lowerCAmelCase__, return_tensors='np') snake_case_ = processor(images=lowerCAmelCase__, return_tensors='np') input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) @require_tf def a_ ( self) -> int: snake_case_ = self.get_image_processor() snake_case_ = SamProcessor(image_processor=lowerCAmelCase__) snake_case_ = [tf.ones((1, 3, 5, 5))] snake_case_ = [[1764, 2646]] snake_case_ = [[683, 1024]] snake_case_ = processor.post_process_masks(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, return_tensors='tf') self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) snake_case_ = processor.post_process_masks( lowerCAmelCase__, tf.convert_to_tensor(lowerCAmelCase__), tf.convert_to_tensor(lowerCAmelCase__), return_tensors='tf', ) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) # should also work with np snake_case_ = [np.ones((1, 3, 5, 5))] snake_case_ = processor.post_process_masks( lowerCAmelCase__, np.array(lowerCAmelCase__), np.array(lowerCAmelCase__), return_tensors='tf') self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) snake_case_ = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError): snake_case_ = processor.post_process_masks( lowerCAmelCase__, np.array(lowerCAmelCase__), np.array(lowerCAmelCase__), return_tensors='tf') @require_vision @require_torchvision class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> List[Any]: snake_case_ = tempfile.mkdtemp() snake_case_ = SamImageProcessor() snake_case_ = SamProcessor(lowerCAmelCase__) processor.save_pretrained(self.tmpdirname) def a_ ( self, **lowerCAmelCase__) -> Optional[int]: return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__).image_processor def a_ ( self) -> int: shutil.rmtree(self.tmpdirname) def a_ ( self) -> Optional[Any]: snake_case_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)] snake_case_ = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1)) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def a_ ( self) -> str: snake_case_ = self.get_image_processor() snake_case_ = SamProcessor(image_processor=lowerCAmelCase__) snake_case_ = np.random.randint(0, 2, size=(1, 3, 5, 5)).astype(np.floataa) snake_case_ = [tf.convert_to_tensor(lowerCAmelCase__)] snake_case_ = [torch.tensor(lowerCAmelCase__)] snake_case_ = [[1764, 2646]] snake_case_ = [[683, 1024]] snake_case_ = processor.post_process_masks( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, return_tensors='tf') snake_case_ = processor.post_process_masks( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, return_tensors='pt') self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy())) @is_pt_tf_cross_test def a_ ( self) -> List[str]: snake_case_ = self.get_image_processor() snake_case_ = SamProcessor(image_processor=lowerCAmelCase__) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(lowerCAmelCase__, return_tensors='pt')['pixel_values'].numpy() snake_case_ = processor(images=lowerCAmelCase__, return_tensors='pt')['pixel_values'].numpy() snake_case_ = image_processor(lowerCAmelCase__, return_tensors='tf')['pixel_values'].numpy() snake_case_ = processor(images=lowerCAmelCase__, return_tensors='tf')['pixel_values'].numpy() self.assertTrue(np.allclose(lowerCAmelCase__, lowerCAmelCase__)) self.assertTrue(np.allclose(lowerCAmelCase__, lowerCAmelCase__)) self.assertTrue(np.allclose(lowerCAmelCase__, lowerCAmelCase__))
69
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCamelCase = { '''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''], '''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''BertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BertForMaskedLM''', '''BertForMultipleChoice''', '''BertForNextSentencePrediction''', '''BertForPreTraining''', '''BertForQuestionAnswering''', '''BertForSequenceClassification''', '''BertForTokenClassification''', '''BertLayer''', '''BertLMHeadModel''', '''BertModel''', '''BertPreTrainedModel''', '''load_tf_weights_in_bert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBertEmbeddings''', '''TFBertForMaskedLM''', '''TFBertForMultipleChoice''', '''TFBertForNextSentencePrediction''', '''TFBertForPreTraining''', '''TFBertForQuestionAnswering''', '''TFBertForSequenceClassification''', '''TFBertForTokenClassification''', '''TFBertLMHeadModel''', '''TFBertMainLayer''', '''TFBertModel''', '''TFBertPreTrainedModel''', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''TFBertTokenizer'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''FlaxBertForCausalLM''', '''FlaxBertForMaskedLM''', '''FlaxBertForMultipleChoice''', '''FlaxBertForNextSentencePrediction''', '''FlaxBertForPreTraining''', '''FlaxBertForQuestionAnswering''', '''FlaxBertForSequenceClassification''', '''FlaxBertForTokenClassification''', '''FlaxBertModel''', '''FlaxBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
69
1
"""simple docstring""" __UpperCamelCase = range(2, 20 + 1) __UpperCamelCase = [10**k for k in range(ks[-1] + 1)] __UpperCamelCase = {} def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]: snake_case_ = sum(a_i[j] for j in range(UpperCAmelCase , len(UpperCAmelCase ) ) ) snake_case_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase ) , UpperCAmelCase ) ) ) snake_case_ , snake_case_ = 0, 0 snake_case_ = n - i snake_case_ = memo.get(UpperCAmelCase ) if sub_memo is not None: snake_case_ = sub_memo.get(UpperCAmelCase ) if jumps is not None and len(UpperCAmelCase ) > 0: # find and make the largest jump without going over snake_case_ = -1 for _k in range(len(UpperCAmelCase ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: snake_case_ = _k break if max_jump >= 0: snake_case_ , snake_case_ , snake_case_ = jumps[max_jump] # since the difference between jumps is cached, add c snake_case_ = diff + c for j in range(min(UpperCAmelCase , len(UpperCAmelCase ) ) ): snake_case_ , snake_case_ = divmod(UpperCAmelCase , 10 ) if new_c > 0: add(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: snake_case_ = [] else: snake_case_ = {c: []} snake_case_ = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps snake_case_ , snake_case_ = next_term(UpperCAmelCase , k - 1 , i + dn , UpperCAmelCase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead snake_case_ , snake_case_ = compute(UpperCAmelCase , UpperCAmelCase , i + dn , UpperCAmelCase ) diff += _diff dn += terms_jumped snake_case_ = sub_memo[c] # keep jumps sorted by # of terms skipped snake_case_ = 0 while j < len(UpperCAmelCase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(UpperCAmelCase , (diff, dn, k) ) return (diff, dn) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: if i >= n: return 0, i if k > len(UpperCAmelCase ): a_i.extend([0 for _ in range(k - len(UpperCAmelCase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) snake_case_ = i snake_case_ , snake_case_ , snake_case_ = 0, 0, 0 for j in range(len(UpperCAmelCase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 snake_case_ = ds_c + ds_b diff += addend snake_case_ = 0 for j in range(UpperCAmelCase ): snake_case_ = a_i[j] + addend snake_case_ , snake_case_ = divmod(UpperCAmelCase , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return diff, i - start_i def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: for j in range(UpperCAmelCase , len(UpperCAmelCase ) ): snake_case_ = digits[j] + addend if s >= 10: snake_case_ , snake_case_ = divmod(UpperCAmelCase , 10 ) snake_case_ = addend // 10 + quotient else: snake_case_ = s snake_case_ = addend // 10 if addend == 0: break while addend > 0: snake_case_ , snake_case_ = divmod(UpperCAmelCase , 10 ) digits.append(UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase = 10**15 ) -> int: snake_case_ = [1] snake_case_ = 1 snake_case_ = 0 while True: snake_case_ , snake_case_ = next_term(UpperCAmelCase , 20 , i + dn , UpperCAmelCase ) dn += terms_jumped if dn == n - i: break snake_case_ = 0 for j in range(len(UpperCAmelCase ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F"""{solution() = }""")
69
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } __UpperCamelCase = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: for attribute in key.split('.' ): snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ) if weight_type is not None: snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "running_mean": snake_case_ = value elif weight_type == "running_var": snake_case_ = value elif weight_type == "num_batches_tracked": snake_case_ = value elif weight_type == "inv_freq": snake_case_ = value else: snake_case_ = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) snake_case_ = True else: for key, mapped_key in MAPPING.items(): snake_case_ = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2] snake_case_ = mapped_key.replace('*' , UpperCAmelCase ) if "pos_bias_u" in name: snake_case_ = None elif "pos_bias_v" in name: snake_case_ = None elif "weight_g" in name: snake_case_ = 'weight_g' elif "weight_v" in name: snake_case_ = 'weight_v' elif "bias" in name: snake_case_ = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = 'weight' elif "running_mean" in name: snake_case_ = 'running_mean' elif "inv_freq" in name: snake_case_ = 'inv_freq' elif "running_var" in name: snake_case_ = 'running_var' elif "num_batches_tracked" in name: snake_case_ = 'num_batches_tracked' else: snake_case_ = None set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) continue if not is_used: unused_weights.append(UpperCAmelCase ) logger.warning(f'Unused weights: {unused_weights}' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: snake_case_ = full_name.split('conv_layers.' )[-1] snake_case_ = name.split('.' ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(UpperCAmelCase ) @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) -> str: if config_path is not None: snake_case_ = WavaVecaConformerConfig.from_pretrained(UpperCAmelCase , hidden_act='swish' ) else: snake_case_ = WavaVecaConformerConfig() if "rope" in checkpoint_path: snake_case_ = 'rotary' if is_finetuned: if dict_path: snake_case_ = Dictionary.load(UpperCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(UpperCAmelCase , 'vocab.json' ) if not os.path.isdir(UpperCAmelCase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase ) ) return os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(UpperCAmelCase , UpperCAmelCase ) snake_case_ = WavaVecaCTCTokenizer( UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase , ) snake_case_ = True if config.feat_extract_norm == 'layer' else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ) snake_case_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) snake_case_ = WavaVecaConformerForCTC(UpperCAmelCase ) else: snake_case_ = WavaVecaConformerForPreTraining(UpperCAmelCase ) if is_finetuned: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: snake_case_ = argparse.Namespace(task='audio_pretraining' ) snake_case_ = fairseq.tasks.setup_task(UpperCAmelCase ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase ) snake_case_ = model[0].eval() recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __UpperCamelCase = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
69
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: # Base Case if curr_ind == len(UpperCAmelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCAmelCase ) ): if valid_connection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): # Insert current vertex into path as next transition snake_case_ = next_ver # Validate created path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , curr_ind + 1 ): return True # Backtrack snake_case_ = -1 return False def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 0 ) -> list[int]: snake_case_ = [-1] * (len(UpperCAmelCase ) + 1) # initialize start and end of path with starting index snake_case_ = snake_case_ = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , 1 ) else []
69
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase ) -> list: if len(UpperCAmelCase ) <= 1: return [tuple(UpperCAmelCase )] snake_case_ = [] def generate(UpperCAmelCase , UpperCAmelCase ): snake_case_ = [0] * n res.append(tuple(UpperCAmelCase ) ) snake_case_ = 0 while i < n: if c[i] < i: if i % 2 == 0: snake_case_ , snake_case_ = arr[i], arr[0] else: snake_case_ , snake_case_ = arr[i], arr[c[i]] res.append(tuple(UpperCAmelCase ) ) c[i] += 1 snake_case_ = 0 else: snake_case_ = 0 i += 1 generate(len(UpperCAmelCase ) , UpperCAmelCase ) return res if __name__ == "__main__": __UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() __UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
69
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase = 100 ) -> int: snake_case_ = (n * (n + 1) // 2) ** 2 snake_case_ = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F"""{solution() = }""")
69
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __UpperCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __UpperCamelCase = TaTokenizerFast __UpperCamelCase = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''MT5EncoderModel''', '''MT5ForConditionalGeneration''', '''MT5ForQuestionAnswering''', '''MT5Model''', '''MT5PreTrainedModel''', '''MT5Stack''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model'''] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __UpperCamelCase = _LazyModule( __name__, globals()['''__file__'''], _import_structure, extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast}, module_spec=__spec__, )
69
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase ) -> int: snake_case_ = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): snake_case_ = 1 for n in range(m + 1 ): for k in range(1 , UpperCAmelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: __UpperCamelCase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: __UpperCamelCase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
69
"""simple docstring""" import sys from collections import defaultdict class UpperCamelCase : def __init__( self) -> Optional[int]: snake_case_ = [] def a_ ( self, lowerCAmelCase__) -> Any: return self.node_position[vertex] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = pos def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: snake_case_ = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: snake_case_ = 2 * start + 1 else: snake_case_ = 2 * start + 2 if heap[smallest_child] < heap[start]: snake_case_ , snake_case_ = heap[smallest_child], positions[smallest_child] snake_case_ , snake_case_ = ( heap[start], positions[start], ) snake_case_ , snake_case_ = temp, tempa snake_case_ = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child], self.get_position(positions[start])) self.set_position(positions[start], lowerCAmelCase__) self.top_to_bottom(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]: snake_case_ = position[index] while index != 0: snake_case_ = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: snake_case_ = heap[parent] snake_case_ = position[parent] self.set_position(position[parent], lowerCAmelCase__) else: snake_case_ = val snake_case_ = temp self.set_position(lowerCAmelCase__, lowerCAmelCase__) break snake_case_ = parent else: snake_case_ = val snake_case_ = temp self.set_position(lowerCAmelCase__, 0) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = len(lowerCAmelCase__) // 2 - 1 for i in range(lowerCAmelCase__, -1, -1): self.top_to_bottom(lowerCAmelCase__, lowerCAmelCase__, len(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = positions[0] snake_case_ = sys.maxsize self.top_to_bottom(lowerCAmelCase__, 0, len(lowerCAmelCase__), lowerCAmelCase__) return temp def UpperCAmelCase ( UpperCAmelCase ) -> Tuple: snake_case_ = Heap() snake_case_ = [0] * len(UpperCAmelCase ) snake_case_ = [-1] * len(UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph snake_case_ = [] # Heap of Distance of vertices from their neighboring vertex snake_case_ = [] for vertex in range(len(UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCAmelCase ) heap.node_position.append(UpperCAmelCase ) snake_case_ = [] snake_case_ = 1 snake_case_ = sys.maxsize for neighbor, distance in adjacency_list[0]: snake_case_ = 0 snake_case_ = distance heap.heapify(UpperCAmelCase , UpperCAmelCase ) for _ in range(1 , len(UpperCAmelCase ) ): snake_case_ = heap.delete_minimum(UpperCAmelCase , UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) snake_case_ = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCAmelCase )] ): snake_case_ = distance heap.bottom_to_top( UpperCAmelCase , heap.get_position(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase ) snake_case_ = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __UpperCamelCase = int(input('''Enter number of edges: ''').strip()) __UpperCamelCase = defaultdict(list) for _ in range(edges_number): __UpperCamelCase = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
69
1
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def UpperCAmelCase ( UpperCAmelCase ) -> Dict: snake_case_ = [] embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight', f'stage{idx}.patch_embed.proj.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias', f'stage{idx}.patch_embed.proj.bias', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight', f'stage{idx}.patch_embed.norm.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias', f'stage{idx}.patch_embed.norm.bias', ) ) return embed def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: snake_case_ = [] attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight', f'stage{idx}.blocks.{cnt}.attn.proj_q.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias', f'stage{idx}.blocks.{cnt}.attn.proj_q.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight', f'stage{idx}.blocks.{cnt}.attn.proj_k.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias', f'stage{idx}.blocks.{cnt}.attn.proj_k.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight', f'stage{idx}.blocks.{cnt}.attn.proj_v.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias', f'stage{idx}.blocks.{cnt}.attn.proj_v.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight', f'stage{idx}.blocks.{cnt}.attn.proj.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias', f'stage{idx}.blocks.{cnt}.attn.proj.bias', ) ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') ) return attention_weights def UpperCAmelCase ( UpperCAmelCase ) -> Dict: snake_case_ = [] token.append((f'cvt.encoder.stages.{idx}.cls_token', 'stage2.cls_token') ) return token def UpperCAmelCase ( ) -> Optional[Any]: snake_case_ = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = 1000 snake_case_ = 'huggingface/label-files' snake_case_ = num_labels snake_case_ = json.load(open(cached_download(hf_hub_url(UpperCAmelCase , UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) ) snake_case_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = snake_case_ = CvtConfig(num_labels=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": snake_case_ = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": snake_case_ = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case_ = [2, 2, 20] snake_case_ = [3, 12, 16] snake_case_ = [192, 768, 1024] snake_case_ = CvtForImageClassification(UpperCAmelCase ) snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) snake_case_ = image_size snake_case_ = torch.load(UpperCAmelCase , map_location=torch.device('cpu' ) ) snake_case_ = OrderedDict() snake_case_ = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case_ = list_of_state_dict + cls_token(UpperCAmelCase ) snake_case_ = list_of_state_dict + embeddings(UpperCAmelCase ) for cnt in range(config.depth[idx] ): snake_case_ = list_of_state_dict + attention(UpperCAmelCase , UpperCAmelCase ) snake_case_ = list_of_state_dict + final() for gg in list_of_state_dict: print(UpperCAmelCase ) for i in range(len(UpperCAmelCase ) ): snake_case_ = original_weights[list_of_state_dict[i][1]] model.load_state_dict(UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) image_processor.save_pretrained(UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __UpperCamelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
69
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
69
1
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class UpperCamelCase ( lowerCAmelCase__ ): @require_torch def a_ ( self) -> Tuple: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched snake_case_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache snake_case_ = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(lowerCAmelCase__) BertModel.from_pretrained(lowerCAmelCase__) BertTokenizer.from_pretrained(lowerCAmelCase__) pipeline(task='fill-mask', model=lowerCAmelCase__) # baseline - just load from_pretrained with normal network snake_case_ = [sys.executable, '-c', '\n'.join([load, run, mock])] # should succeed snake_case_ = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files snake_case_ = '1' snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__) self.assertEqual(result.returncode, 0, result.stderr) self.assertIn('success', result.stdout.decode()) @require_torch def a_ ( self) -> Tuple: # python one-liner segments # this must be loaded before socket.socket is monkey-patched snake_case_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache snake_case_ = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(lowerCAmelCase__) BertModel.from_pretrained(lowerCAmelCase__) BertTokenizer.from_pretrained(lowerCAmelCase__) pipeline(task='fill-mask', model=lowerCAmelCase__) # baseline - just load from_pretrained with normal network snake_case_ = [sys.executable, '-c', '\n'.join([load, run, mock])] # should succeed snake_case_ = self.get_env() snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__) self.assertEqual(result.returncode, 0, result.stderr) self.assertIn('success', result.stdout.decode()) @require_torch def a_ ( self) -> Union[str, Any]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched snake_case_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n ' snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network snake_case_ = [sys.executable, '-c', '\n'.join([load, run])] # should succeed snake_case_ = self.get_env() snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__) self.assertEqual(result.returncode, 0, result.stderr) self.assertIn('success', result.stdout.decode()) # next emulate no network snake_case_ = [sys.executable, '-c', '\n'.join([load, mock, run])] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files snake_case_ = '1' snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__) self.assertEqual(result.returncode, 0, result.stderr) self.assertIn('success', result.stdout.decode()) @require_torch def a_ ( self) -> Optional[int]: snake_case_ = '\nfrom transformers import pipeline\n ' snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n ' snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n ' snake_case_ = self.get_env() snake_case_ = '1' snake_case_ = [sys.executable, '-c', '\n'.join([load, mock, run])] snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__) self.assertEqual(result.returncode, 1, result.stderr) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode', result.stderr.decode().replace('\n', ''), ) @require_torch def a_ ( self) -> List[str]: snake_case_ = '\nfrom transformers import AutoModel\n ' snake_case_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n ' # baseline - just load from_pretrained with normal network snake_case_ = [sys.executable, '-c', '\n'.join([load, run])] # should succeed snake_case_ = self.get_env() snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__) self.assertEqual(result.returncode, 0, result.stderr) self.assertIn('success', result.stdout.decode()) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files snake_case_ = '1' snake_case_ = subprocess.run(lowerCAmelCase__, env=lowerCAmelCase__, check=lowerCAmelCase__, capture_output=lowerCAmelCase__) self.assertEqual(result.returncode, 0, result.stderr) self.assertIn('success', result.stdout.decode())
69
"""simple docstring""" from math import factorial def UpperCAmelCase ( UpperCAmelCase = 20 ) -> int: snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... snake_case_ = n // 2 return int(factorial(UpperCAmelCase ) / (factorial(UpperCAmelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: __UpperCamelCase = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number.''')
69
1
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __UpperCamelCase = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase ( datasets.BuilderConfig ): SCREAMING_SNAKE_CASE_ = 1_0_0_0_0 SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None class UpperCamelCase ( datasets.ArrowBasedBuilder ): SCREAMING_SNAKE_CASE_ = ParquetConfig def a_ ( self) -> Tuple: return datasets.DatasetInfo(features=self.config.features) def a_ ( self, lowerCAmelCase__) -> str: if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}') snake_case_ = dl_manager.download_and_extract(self.config.data_files) if isinstance(lowerCAmelCase__, (str, list, tuple)): snake_case_ = data_files if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive snake_case_ = [dl_manager.iter_files(lowerCAmelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'files': files})] snake_case_ = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive snake_case_ = [dl_manager.iter_files(lowerCAmelCase__) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(lowerCAmelCase__): with open(lowerCAmelCase__, 'rb') as f: snake_case_ = datasets.Features.from_arrow_schema(pq.read_schema(lowerCAmelCase__)) break splits.append(datasets.SplitGenerator(name=lowerCAmelCase__, gen_kwargs={'files': files})) return splits def a_ ( self, lowerCAmelCase__) -> pa.Table: if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example snake_case_ = table_cast(lowerCAmelCase__, self.info.features.arrow_schema) return pa_table def a_ ( self, lowerCAmelCase__) -> Tuple: snake_case_ = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'') for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__)): with open(lowerCAmelCase__, 'rb') as f: snake_case_ = pq.ParquetFile(lowerCAmelCase__) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns)): snake_case_ = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'{file_idx}_{batch_idx}', self._cast_table(lowerCAmelCase__) except ValueError as e: logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase__)}: {e}') raise
69
"""simple docstring""" import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int: snake_case_ = nn.functional.normalize(UpperCAmelCase ) snake_case_ = nn.functional.normalize(UpperCAmelCase ) return torch.mm(UpperCAmelCase , normalized_text_embeds.t() ) class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = CLIPConfig SCREAMING_SNAKE_CASE_ = ["CLIPEncoderLayer"] def __init__( self, lowerCAmelCase__) -> Optional[int]: super().__init__(lowerCAmelCase__) snake_case_ = CLIPVisionModel(config.vision_config) snake_case_ = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=lowerCAmelCase__) snake_case_ = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=lowerCAmelCase__) snake_case_ = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=lowerCAmelCase__) snake_case_ = nn.Parameter(torch.ones(17), requires_grad=lowerCAmelCase__) snake_case_ = nn.Parameter(torch.ones(3), requires_grad=lowerCAmelCase__) @torch.no_grad() def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple: snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output snake_case_ = self.visual_projection(lowerCAmelCase__) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds).cpu().float().numpy() snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds).cpu().float().numpy() snake_case_ = [] snake_case_ = image_embeds.shape[0] for i in range(lowerCAmelCase__): snake_case_ = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images snake_case_ = 0.0 for concept_idx in range(len(special_cos_dist[0])): snake_case_ = special_cos_dist[i][concept_idx] snake_case_ = self.special_care_embeds_weights[concept_idx].item() snake_case_ = round(concept_cos - concept_threshold + adjustment, 3) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]}) snake_case_ = 0.01 for concept_idx in range(len(cos_dist[0])): snake_case_ = cos_dist[i][concept_idx] snake_case_ = self.concept_embeds_weights[concept_idx].item() snake_case_ = round(concept_cos - concept_threshold + adjustment, 3) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowerCAmelCase__) result.append(lowerCAmelCase__) snake_case_ = [len(res['bad_concepts']) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]: snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output snake_case_ = self.visual_projection(lowerCAmelCase__) snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds) snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images snake_case_ = 0.0 snake_case_ = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) snake_case_ = torch.any(special_scores > 0, dim=1) snake_case_ = special_care * 0.01 snake_case_ = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1]) snake_case_ = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) snake_case_ = torch.any(concept_scores > 0, dim=1) return images, has_nsfw_concepts
69
1
"""simple docstring""" from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["vqvae"] def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Optional[Any]: super().__init__() self.register_modules(unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, mel=lowerCAmelCase__, vqvae=lowerCAmelCase__) def a_ ( self) -> int: return 50 if isinstance(self.scheduler, lowerCAmelCase__) else 1000 @torch.no_grad() def __call__( self, lowerCAmelCase__ = 1, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = 0, lowerCAmelCase__ = 0, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = 0, lowerCAmelCase__ = 0, lowerCAmelCase__ = None, lowerCAmelCase__ = 0, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__=True, ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: snake_case_ = steps or self.get_default_steps() self.scheduler.set_timesteps(lowerCAmelCase__) snake_case_ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size) == int: snake_case_ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: snake_case_ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ), generator=lowerCAmelCase__, device=self.device, ) snake_case_ = noise snake_case_ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = self.mel.audio_slice_to_image(lowerCAmelCase__) snake_case_ = np.frombuffer(input_image.tobytes(), dtype='uint8').reshape( (input_image.height, input_image.width)) snake_case_ = (input_image / 255) * 2 - 1 snake_case_ = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float).to(self.device) if self.vqvae is not None: snake_case_ = self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__, 0)).latent_dist.sample( generator=lowerCAmelCase__)[0] snake_case_ = self.vqvae.config.scaling_factor * input_images if start_step > 0: snake_case_ = self.scheduler.add_noise(lowerCAmelCase__, lowerCAmelCase__, self.scheduler.timesteps[start_step - 1]) snake_case_ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) snake_case_ = int(mask_start_secs * pixels_per_second) snake_case_ = int(mask_end_secs * pixels_per_second) snake_case_ = self.scheduler.add_noise(lowerCAmelCase__, lowerCAmelCase__, torch.tensor(self.scheduler.timesteps[start_step:])) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])): if isinstance(self.unet, lowerCAmelCase__): snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)['sample'] else: snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__)['sample'] if isinstance(self.scheduler, lowerCAmelCase__): snake_case_ = self.scheduler.step( model_output=lowerCAmelCase__, timestep=lowerCAmelCase__, sample=lowerCAmelCase__, eta=lowerCAmelCase__, generator=lowerCAmelCase__, )['prev_sample'] else: snake_case_ = self.scheduler.step( model_output=lowerCAmelCase__, timestep=lowerCAmelCase__, sample=lowerCAmelCase__, generator=lowerCAmelCase__, )['prev_sample'] if mask is not None: if mask_start > 0: snake_case_ = mask[:, step, :, :mask_start] if mask_end > 0: snake_case_ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance snake_case_ = 1 / self.vqvae.config.scaling_factor * images snake_case_ = self.vqvae.decode(lowerCAmelCase__)['sample'] snake_case_ = (images / 2 + 0.5).clamp(0, 1) snake_case_ = images.cpu().permute(0, 2, 3, 1).numpy() snake_case_ = (images * 255).round().astype('uint8') snake_case_ = list( (Image.fromarray(_[:, :, 0]) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowerCAmelCase__, mode='RGB').convert('L') for _ in images)) snake_case_ = [self.mel.image_to_audio(lowerCAmelCase__) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__)[:, np.newaxis, :]), **ImagePipelineOutput(lowerCAmelCase__)) @torch.no_grad() def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = 50) -> np.ndarray: assert isinstance(self.scheduler, lowerCAmelCase__) self.scheduler.set_timesteps(lowerCAmelCase__) snake_case_ = np.array( [np.frombuffer(image.tobytes(), dtype='uint8').reshape((1, image.height, image.width)) for image in images]) snake_case_ = (sample / 255) * 2 - 1 snake_case_ = torch.Tensor(lowerCAmelCase__).to(self.device) for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,))): snake_case_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps snake_case_ = self.scheduler.alphas_cumprod[t] snake_case_ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) snake_case_ = 1 - alpha_prod_t snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__)['sample'] snake_case_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output snake_case_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) snake_case_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> torch.Tensor: snake_case_ = acos(torch.dot(torch.flatten(lowerCAmelCase__), torch.flatten(lowerCAmelCase__)) / torch.norm(lowerCAmelCase__) / torch.norm(lowerCAmelCase__)) return sin((1 - alpha) * theta) * xa / sin(lowerCAmelCase__) + sin(alpha * theta) * xa / sin(lowerCAmelCase__)
69
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = (DPMSolverSinglestepScheduler,) SCREAMING_SNAKE_CASE_ = (("num_inference_steps", 2_5),) def a_ ( self, **lowerCAmelCase__) -> int: snake_case_ = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf'), 'variance_type': None, } config.update(**lowerCAmelCase__) return config def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> List[Any]: snake_case_ = dict(self.forward_default_kwargs) snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config(**lowerCAmelCase__) snake_case_ = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals snake_case_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__) snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__) new_scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case_ , snake_case_ = sample, sample for t in range(lowerCAmelCase__, time_step + scheduler.config.solver_order + 1): snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def a_ ( self) -> Union[str, Any]: pass def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> int: snake_case_ = dict(self.forward_default_kwargs) snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config() snake_case_ = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals (must be after setting timesteps) snake_case_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__) snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residual (must be after setting timesteps) snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def a_ ( self, lowerCAmelCase__=None, **lowerCAmelCase__) -> Union[str, Any]: if scheduler is None: snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(**lowerCAmelCase__) snake_case_ = scheduler_class(**lowerCAmelCase__) snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(**lowerCAmelCase__) snake_case_ = scheduler_class(**lowerCAmelCase__) snake_case_ = 10 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__) for i, t in enumerate(scheduler.timesteps): snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample return sample def a_ ( self) -> List[Any]: snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) snake_case_ = 50 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:]): snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2574) < 1e-3 def a_ ( self) -> Dict: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__) def a_ ( self) -> Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) snake_case_ = self.full_loop(scheduler=lowerCAmelCase__) snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2791) < 1e-3 snake_case_ = DEISMultistepScheduler.from_config(scheduler.config) snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config) snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config) snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config) snake_case_ = self.full_loop(scheduler=lowerCAmelCase__) snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2791) < 1e-3 def a_ ( self) -> str: self.check_over_configs(thresholding=lowerCAmelCase__) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowerCAmelCase__, prediction_type=lowerCAmelCase__, sample_max_value=lowerCAmelCase__, algorithm_type='dpmsolver++', solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, ) def a_ ( self) -> Tuple: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__) def a_ ( self) -> Optional[int]: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, algorithm_type=lowerCAmelCase__, ) snake_case_ = self.full_loop( solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, algorithm_type=lowerCAmelCase__, ) assert not torch.isnan(lowerCAmelCase__).any(), "Samples have nan numbers" def a_ ( self) -> Optional[Any]: self.check_over_configs(lower_order_final=lowerCAmelCase__) self.check_over_configs(lower_order_final=lowerCAmelCase__) def a_ ( self) -> Any: self.check_over_configs(lambda_min_clipped=-float('inf')) self.check_over_configs(lambda_min_clipped=-5.1) def a_ ( self) -> Any: self.check_over_configs(variance_type=lowerCAmelCase__) self.check_over_configs(variance_type='learned_range') def a_ ( self) -> List[Any]: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=lowerCAmelCase__, time_step=0) def a_ ( self) -> int: snake_case_ = self.full_loop() snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2791) < 1e-3 def a_ ( self) -> Dict: snake_case_ = self.full_loop(use_karras_sigmas=lowerCAmelCase__) snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2248) < 1e-3 def a_ ( self) -> Union[str, Any]: snake_case_ = self.full_loop(prediction_type='v_prediction') snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.1453) < 1e-3 def a_ ( self) -> Optional[Any]: snake_case_ = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=lowerCAmelCase__) snake_case_ = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.0649) < 1e-3 def a_ ( self) -> Optional[int]: snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(thresholding=lowerCAmelCase__, dynamic_thresholding_ratio=0) snake_case_ = scheduler_class(**lowerCAmelCase__) snake_case_ = 10 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter.half() scheduler.set_timesteps(lowerCAmelCase__) for i, t in enumerate(scheduler.timesteps): snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample assert sample.dtype == torch.floataa
69
1
"""simple docstring""" from math import sqrt def UpperCAmelCase ( UpperCAmelCase = 1000000 ) -> int: snake_case_ = 0 snake_case_ = 0 snake_case_ = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(UpperCAmelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
69
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: # Base Case if curr_ind == len(UpperCAmelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCAmelCase ) ): if valid_connection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): # Insert current vertex into path as next transition snake_case_ = next_ver # Validate created path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , curr_ind + 1 ): return True # Backtrack snake_case_ = -1 return False def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 0 ) -> list[int]: snake_case_ = [-1] * (len(UpperCAmelCase ) + 1) # initialize start and end of path with starting index snake_case_ = snake_case_ = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , 1 ) else []
69
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class UpperCamelCase ( lowerCAmelCase__ ): def __init__( self, lowerCAmelCase__) -> str: snake_case_ = data def __iter__( self) -> List[Any]: for element in self.data: yield element def UpperCAmelCase ( UpperCAmelCase=True ) -> int: snake_case_ = Accelerator(even_batches=UpperCAmelCase ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> List[str]: if iterable: snake_case_ = DummyIterableDataset(torch.as_tensor(range(UpperCAmelCase ) ) ) else: snake_case_ = TensorDataset(torch.as_tensor(range(UpperCAmelCase ) ) ) snake_case_ = DataLoader(UpperCAmelCase , batch_size=UpperCAmelCase ) snake_case_ = accelerator.prepare(UpperCAmelCase ) return dl def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]: snake_case_ = create_dataloader(accelerator=UpperCAmelCase , dataset_size=UpperCAmelCase , batch_size=UpperCAmelCase ) snake_case_ = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def UpperCAmelCase ( ) -> Tuple: snake_case_ = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( UpperCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( UpperCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def UpperCAmelCase ( ) -> str: snake_case_ = create_accelerator(even_batches=UpperCAmelCase ) verify_dataloader_batch_sizes( UpperCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( UpperCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def UpperCAmelCase ( ) -> int: snake_case_ = create_accelerator(even_batches=UpperCAmelCase ) snake_case_ = torch.nn.Linear(1 , 1 ) snake_case_ = accelerator.prepare(UpperCAmelCase ) snake_case_ = create_dataloader(UpperCAmelCase , dataset_size=3 , batch_size=1 ) snake_case_ = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(UpperCAmelCase ): snake_case_ = ddp_model(batch[0].float() ) snake_case_ = output.sum() loss.backward() batch_idxs.append(UpperCAmelCase ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def UpperCAmelCase ( UpperCAmelCase ) -> Any: with warnings.catch_warnings(record=UpperCAmelCase ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , UpperCAmelCase ) assert "only supported for multi-GPU" in str(w[-1].message ) def UpperCAmelCase ( ) -> Optional[int]: snake_case_ = True snake_case_ = False snake_case_ = create_accelerator(even_batches=UpperCAmelCase ) snake_case_ = torch.nn.Linear(1 , 1 ) snake_case_ = accelerator.prepare(UpperCAmelCase ) snake_case_ = create_dataloader(UpperCAmelCase , dataset_size=3 , batch_size=1 ) snake_case_ = create_dataloader(UpperCAmelCase , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase ): snake_case_ = train_dl.batch_sampler.even_batches snake_case_ = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def UpperCAmelCase ( ) -> int: snake_case_ = True snake_case_ = False snake_case_ = create_accelerator(even_batches=UpperCAmelCase ) snake_case_ = torch.nn.Linear(1 , 1 ) snake_case_ = accelerator.prepare(UpperCAmelCase ) create_dataloader(UpperCAmelCase , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase ) snake_case_ = create_dataloader(UpperCAmelCase , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings('ignore' ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase ): snake_case_ = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def UpperCAmelCase ( ) -> Any: snake_case_ = create_accelerator() snake_case_ = torch.nn.Linear(1 , 1 ) snake_case_ = accelerator.prepare(UpperCAmelCase ) create_dataloader(UpperCAmelCase , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase ) with warnings.catch_warnings(record=UpperCAmelCase ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase ): pass assert issubclass(w[-1].category , UpperCAmelCase ) assert "only supported for map-style datasets" in str(w[-1].message ) def UpperCAmelCase ( ) -> Optional[int]: snake_case_ = create_accelerator() accelerator.print('Test that even_batches variable ensures uniform batches across processes' ) test_default_ensures_even_batch_sizes() accelerator.print('Run tests with even_batches disabled' ) test_can_disable_even_batches() accelerator.print('Test joining uneven inputs' ) test_can_join_uneven_inputs() accelerator.print('Test overriding even_batches when joining uneven inputs' ) test_join_can_override_even_batches() accelerator.print('Test overriding even_batches for mixed dataloader types' ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print('Test join with non DDP distributed raises warning' ) snake_case_ = accelerator.state.distributed_type snake_case_ = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(UpperCAmelCase ) snake_case_ = original_state if __name__ == "__main__": main()
69
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = '''▁''' __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} __UpperCamelCase = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } __UpperCamelCase = { '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off __UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] def __init__( self, lowerCAmelCase__, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__ = None, lowerCAmelCase__=None, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Union[str, Any]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs snake_case_ = legacy_behaviour super().__init__( bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(lowerCAmelCase__)) snake_case_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token snake_case_ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case_ = 1 snake_case_ = len(self.sp_model) snake_case_ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__) } snake_case_ = {v: k for k, v in self.lang_code_to_id.items()} snake_case_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} snake_case_ = list(self.lang_code_to_id.keys()) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens]) snake_case_ = src_lang if src_lang is not None else 'eng_Latn' snake_case_ = self.lang_code_to_id[self._src_lang] snake_case_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__( self) -> Union[str, Any]: snake_case_ = self.__dict__.copy() snake_case_ = None snake_case_ = self.sp_model.serialized_model_proto() return state def __setstate__( self, lowerCAmelCase__) -> Tuple: snake_case_ = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs'): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def a_ ( self) -> str: return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def a_ ( self) -> str: return self._src_lang @src_lang.setter def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__) snake_case_ = [1] * len(self.prefix_tokens) snake_case_ = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(lowerCAmelCase__)) + suffix_ones return prefix_ones + ([0] * len(lowerCAmelCase__)) + ([0] * len(lowerCAmelCase__)) + suffix_ones def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> str: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') snake_case_ = src_lang snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) snake_case_ = tgt_lang_id return inputs def a_ ( self) -> List[Any]: snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def a_ ( self, lowerCAmelCase__) -> List[str]: return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case_ = self.sp_model.PieceToId(lowerCAmelCase__) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def a_ ( self, lowerCAmelCase__) -> Dict: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def a_ ( self, lowerCAmelCase__) -> List[str]: snake_case_ = ''.join(lowerCAmelCase__).replace(lowerCAmelCase__, ' ').strip() return out_string def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, lowerCAmelCase__) elif not os.path.isfile(self.vocab_file): with open(lowerCAmelCase__, 'wb') as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__) return (out_vocab_file,) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "eng_Latn", lowerCAmelCase__ = None, lowerCAmelCase__ = "fra_Latn", **lowerCAmelCase__, ) -> BatchEncoding: snake_case_ = src_lang snake_case_ = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self) -> Union[str, Any]: return self.set_src_lang_special_tokens(self.src_lang) def a_ ( self) -> int: return self.set_tgt_lang_special_tokens(self.tgt_lang) def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = self.lang_code_to_id[src_lang] if self.legacy_behaviour: snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] else: snake_case_ = [self.cur_lang_code] snake_case_ = [self.eos_token_id] def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = self.lang_code_to_id[lang] if self.legacy_behaviour: snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] else: snake_case_ = [self.cur_lang_code] snake_case_ = [self.eos_token_id]
69
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __UpperCamelCase = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def UpperCAmelCase ( UpperCAmelCase ) -> Union[str, Any]: from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> Dict: from diffusers.utils.testing_utils import pytest_terminal_summary_main snake_case_ = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(UpperCAmelCase , id=UpperCAmelCase )
69
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCAmelCase ( ) -> int: snake_case_ = HfArgumentParser(UpperCAmelCase ) snake_case_ = parser.parse_args_into_dataclasses()[0] snake_case_ = TensorFlowBenchmark(args=UpperCAmelCase ) try: snake_case_ = parser.parse_args_into_dataclasses()[0] except ValueError as e: snake_case_ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' snake_case_ = ' '.join(str(UpperCAmelCase ).split(' ' )[:-1] ) snake_case_ = '' snake_case_ = eval(str(UpperCAmelCase ).split(' ' )[-1] ) snake_case_ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(UpperCAmelCase ) if len(UpperCAmelCase ) > 0: snake_case_ = full_error_msg + begin_error_msg + str(UpperCAmelCase ) raise ValueError(UpperCAmelCase ) benchmark.run() if __name__ == "__main__": main()
69
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = False ) -> bool: if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3317044064679887385961981 and not allow_probable: raise ValueError( 'Warning: upper bound of deterministic test is exceeded. ' 'Pass allow_probable=True to allow probabilistic test. ' 'A return value of True indicates a probable prime.' ) # array bounds provided by analysis snake_case_ = [ 2047, 1373653, 25326001, 3215031751, 2152302898747, 3474749660383, 341550071728321, 1, 3825123056546413051, 1, 1, 318665857834031151167461, 3317044064679887385961981, ] snake_case_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(UpperCAmelCase , 1 ): if n < _p: # then we have our last prime to check snake_case_ = primes[:idx] break snake_case_ , snake_case_ = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: snake_case_ = False for r in range(UpperCAmelCase ): snake_case_ = pow(UpperCAmelCase , d * 2**r , UpperCAmelCase ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): snake_case_ = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def UpperCAmelCase ( ) -> None: assert not miller_rabin(561 ) assert miller_rabin(563 ) # 2047 assert not miller_rabin(838201 ) assert miller_rabin(838207 ) # 1_373_653 assert not miller_rabin(17316001 ) assert miller_rabin(17316017 ) # 25_326_001 assert not miller_rabin(3078386641 ) assert miller_rabin(3078386653 ) # 3_215_031_751 assert not miller_rabin(1713045574801 ) assert miller_rabin(1713045574819 ) # 2_152_302_898_747 assert not miller_rabin(2779799728307 ) assert miller_rabin(2779799728327 ) # 3_474_749_660_383 assert not miller_rabin(113850023909441 ) assert miller_rabin(113850023909527 ) # 341_550_071_728_321 assert not miller_rabin(1275041018848804351 ) assert miller_rabin(1275041018848804391 ) # 3_825_123_056_546_413_051 assert not miller_rabin(79666464458507787791867 ) assert miller_rabin(79666464458507787791951 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(552840677446647897660333 ) assert miller_rabin(552840677446647897660359 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
69
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( UpperCAmelCase ) -> None: create_state_space_tree(UpperCAmelCase , [] , 0 , [0 for i in range(len(UpperCAmelCase ) )] ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None: if index == len(UpperCAmelCase ): print(UpperCAmelCase ) return for i in range(len(UpperCAmelCase ) ): if not index_used[i]: current_sequence.append(sequence[i] ) snake_case_ = True create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 , UpperCAmelCase ) current_sequence.pop() snake_case_ = False __UpperCamelCase = [3, 1, 2, 4] generate_all_permutations(sequence) __UpperCamelCase = ["A", "B", "C"] generate_all_permutations(sequence_a)
69
1
"""simple docstring""" import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def UpperCAmelCase ( UpperCAmelCase ) -> tuple: return (data["data"], data["target"]) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> XGBClassifier: snake_case_ = XGBClassifier() classifier.fit(UpperCAmelCase , UpperCAmelCase ) return classifier def UpperCAmelCase ( ) -> None: snake_case_ = load_iris() snake_case_ , snake_case_ = data_handling(UpperCAmelCase ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split( UpperCAmelCase , UpperCAmelCase , test_size=0.25 ) snake_case_ = iris['target_names'] # Create an XGBoost Classifier from the training data snake_case_ = xgboost(UpperCAmelCase , UpperCAmelCase ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , display_labels=UpperCAmelCase , cmap='Blues' , normalize='true' , ) plt.title('Normalized Confusion Matrix - IRIS Dataset' ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
69
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = False ) -> bool: if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3317044064679887385961981 and not allow_probable: raise ValueError( 'Warning: upper bound of deterministic test is exceeded. ' 'Pass allow_probable=True to allow probabilistic test. ' 'A return value of True indicates a probable prime.' ) # array bounds provided by analysis snake_case_ = [ 2047, 1373653, 25326001, 3215031751, 2152302898747, 3474749660383, 341550071728321, 1, 3825123056546413051, 1, 1, 318665857834031151167461, 3317044064679887385961981, ] snake_case_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(UpperCAmelCase , 1 ): if n < _p: # then we have our last prime to check snake_case_ = primes[:idx] break snake_case_ , snake_case_ = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: snake_case_ = False for r in range(UpperCAmelCase ): snake_case_ = pow(UpperCAmelCase , d * 2**r , UpperCAmelCase ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): snake_case_ = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def UpperCAmelCase ( ) -> None: assert not miller_rabin(561 ) assert miller_rabin(563 ) # 2047 assert not miller_rabin(838201 ) assert miller_rabin(838207 ) # 1_373_653 assert not miller_rabin(17316001 ) assert miller_rabin(17316017 ) # 25_326_001 assert not miller_rabin(3078386641 ) assert miller_rabin(3078386653 ) # 3_215_031_751 assert not miller_rabin(1713045574801 ) assert miller_rabin(1713045574819 ) # 2_152_302_898_747 assert not miller_rabin(2779799728307 ) assert miller_rabin(2779799728327 ) # 3_474_749_660_383 assert not miller_rabin(113850023909441 ) assert miller_rabin(113850023909527 ) # 341_550_071_728_321 assert not miller_rabin(1275041018848804351 ) assert miller_rabin(1275041018848804391 ) # 3_825_123_056_546_413_051 assert not miller_rabin(79666464458507787791867 ) assert miller_rabin(79666464458507787791951 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(552840677446647897660333 ) assert miller_rabin(552840677446647897660359 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
69
1
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCAmelCase ( ) -> int: snake_case_ = HfArgumentParser(UpperCAmelCase ) snake_case_ = parser.parse_args_into_dataclasses()[0] snake_case_ = TensorFlowBenchmark(args=UpperCAmelCase ) try: snake_case_ = parser.parse_args_into_dataclasses()[0] except ValueError as e: snake_case_ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' snake_case_ = ' '.join(str(UpperCAmelCase ).split(' ' )[:-1] ) snake_case_ = '' snake_case_ = eval(str(UpperCAmelCase ).split(' ' )[-1] ) snake_case_ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(UpperCAmelCase ) if len(UpperCAmelCase ) > 0: snake_case_ = full_error_msg + begin_error_msg + str(UpperCAmelCase ) raise ValueError(UpperCAmelCase ) benchmark.run() if __name__ == "__main__": main()
69
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCamelCase = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
69
1
"""simple docstring""" import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=32, lowerCAmelCase__=2, lowerCAmelCase__=3, lowerCAmelCase__=16, lowerCAmelCase__=[32, 64, 128], lowerCAmelCase__=[1, 2, 1], lowerCAmelCase__=[2, 2, 4], lowerCAmelCase__=2, lowerCAmelCase__=2.0, lowerCAmelCase__=True, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__="gelu", lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__=True, lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=10, lowerCAmelCase__=8, lowerCAmelCase__=["stage1", "stage2"], lowerCAmelCase__=[1, 2], ) -> Optional[Any]: snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = embed_dim snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = num_heads snake_case_ = window_size snake_case_ = mlp_ratio snake_case_ = qkv_bias snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = drop_path_rate snake_case_ = hidden_act snake_case_ = use_absolute_embeddings snake_case_ = patch_norm snake_case_ = layer_norm_eps snake_case_ = initializer_range snake_case_ = is_training snake_case_ = scope snake_case_ = use_labels snake_case_ = type_sequence_label_size snake_case_ = encoder_stride snake_case_ = out_features snake_case_ = out_indices def a_ ( self) -> Dict: snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size) snake_case_ = self.get_config() return config, pixel_values, labels def a_ ( self) -> int: return FocalNetConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, hidden_sizes=self.hidden_sizes, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = FocalNetModel(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model(lowerCAmelCase__) snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) snake_case_ = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = FocalNetBackbone(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model(lowerCAmelCase__) # verify feature maps self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.image_size, 8, 8]) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[:-1]) # verify backbone works with out_features=None snake_case_ = None snake_case_ = FocalNetBackbone(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model(lowerCAmelCase__) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.image_size * 2, 4, 4]) # verify channels self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]: snake_case_ = FocalNetForMaskedImageModeling(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model(lowerCAmelCase__) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images snake_case_ = 1 snake_case_ = FocalNetForMaskedImageModeling(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) snake_case_ = model(lowerCAmelCase__) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]: snake_case_ = self.type_sequence_label_size snake_case_ = FocalNetForImageClassification(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model(lowerCAmelCase__, labels=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images snake_case_ = 1 snake_case_ = FocalNetForImageClassification(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) snake_case_ = model(lowerCAmelCase__) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def a_ ( self) -> Any: snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def a_ ( self) -> Any: snake_case_ = FocalNetModelTester(self) snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, embed_dim=37, has_text_modality=lowerCAmelCase__) def a_ ( self) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a_ ( self) -> List[str]: return def a_ ( self) -> Tuple: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__) def a_ ( self) -> int: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCAmelCase__) def a_ ( self) -> List[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__) def a_ ( self) -> List[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__) @unittest.skip(reason='FocalNet does not use inputs_embeds') def a_ ( self) -> Optional[Any]: pass @unittest.skip(reason='FocalNet does not use feedforward chunking') def a_ ( self) -> int: pass def a_ ( self) -> List[Any]: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: snake_case_ = model_class(lowerCAmelCase__) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) snake_case_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__, nn.Linear)) def a_ ( self) -> str: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['pixel_values'] self.assertListEqual(arg_names[:1], lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Any: snake_case_ = model_class(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__)) snake_case_ = outputs.hidden_states snake_case_ = getattr( self.model_tester, 'expected_num_hidden_layers', len(self.model_tester.depths) + 1) self.assertEqual(len(lowerCAmelCase__), lowerCAmelCase__) # FocalNet has a different seq_length snake_case_ = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) snake_case_ = outputs.reshaped_hidden_states self.assertEqual(len(lowerCAmelCase__), lowerCAmelCase__) snake_case_ , snake_case_ , snake_case_ , snake_case_ = reshaped_hidden_states[0].shape snake_case_ = ( reshaped_hidden_states[0].view(lowerCAmelCase__, lowerCAmelCase__, height * width).permute(0, 2, 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def a_ ( self) -> Tuple: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: snake_case_ = True self.check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True self.check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Optional[Any]: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = 3 snake_case_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: snake_case_ = True self.check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True self.check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, (padded_height, padded_width)) @slow def a_ ( self) -> List[str]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = FocalNetModel.from_pretrained(lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) def a_ ( self) -> Union[str, Any]: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = _config_zero_init(lowerCAmelCase__) for model_class in self.all_model_classes: snake_case_ = model_class(config=lowerCAmelCase__) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized', ) @require_vision @require_torch class UpperCamelCase ( unittest.TestCase ): @cached_property def a_ ( self) -> Union[str, Any]: # TODO update organization return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny') if is_vision_available() else None @slow def a_ ( self) -> str: snake_case_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny').to(lowerCAmelCase__) snake_case_ = self.default_image_processor snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') snake_case_ = image_processor(images=lowerCAmelCase__, return_tensors='pt').to(lowerCAmelCase__) # forward pass with torch.no_grad(): snake_case_ = model(**lowerCAmelCase__) # verify the logits snake_case_ = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, lowerCAmelCase__) snake_case_ = torch.tensor([0.2166, -0.4368, 0.2191]).to(lowerCAmelCase__) self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase__, atol=1e-4)) self.assertTrue(outputs.logits.argmax(dim=-1).item(), 281) @require_torch class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = (FocalNetBackbone,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ = FocalNetConfig SCREAMING_SNAKE_CASE_ = False def a_ ( self) -> int: snake_case_ = FocalNetModelTester(self)
69
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase = { '''facebook/nllb-large-en-ro''': 1024, '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off __UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = NllbTokenizer SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token snake_case_ = legacy_behaviour super().__init__( vocab_file=lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, legacy_behaviour=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True snake_case_ = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens}) snake_case_ = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase__) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case_ = src_lang if src_lang is not None else 'eng_Latn' snake_case_ = self.convert_tokens_to_ids(self._src_lang) snake_case_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def a_ ( self) -> str: return self._src_lang @src_lang.setter def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> str: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') snake_case_ = src_lang snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) snake_case_ = tgt_lang_id return inputs def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "eng_Latn", lowerCAmelCase__ = None, lowerCAmelCase__ = "fra_Latn", **lowerCAmelCase__, ) -> BatchEncoding: snake_case_ = src_lang snake_case_ = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self) -> List[Any]: return self.set_src_lang_special_tokens(self.src_lang) def a_ ( self) -> Tuple: return self.set_tgt_lang_special_tokens(self.tgt_lang) def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) if self.legacy_behaviour: snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] else: snake_case_ = [self.cur_lang_code] snake_case_ = [self.eos_token_id] snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens) snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens) snake_case_ = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def a_ ( self, lowerCAmelCase__) -> None: snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__) if self.legacy_behaviour: snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] else: snake_case_ = [self.cur_lang_code] snake_case_ = [self.eos_token_id] snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens) snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens) snake_case_ = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)), ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory.') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file, lowerCAmelCase__) return (out_vocab_file,)
69
1
"""simple docstring""" import math import sys def UpperCAmelCase ( UpperCAmelCase ) -> int: if number != int(UpperCAmelCase ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 snake_case_ = [-1] * (number + 1) snake_case_ = 0 for i in range(1 , number + 1 ): snake_case_ = sys.maxsize snake_case_ = int(math.sqrt(UpperCAmelCase ) ) for j in range(1 , root + 1 ): snake_case_ = 1 + answers[i - (j**2)] snake_case_ = min(UpperCAmelCase , UpperCAmelCase ) snake_case_ = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
69
"""simple docstring""" from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = '''T5Config''' class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "mt5" SCREAMING_SNAKE_CASE_ = MTaConfig class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "mt5" SCREAMING_SNAKE_CASE_ = MTaConfig class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "mt5" SCREAMING_SNAKE_CASE_ = MTaConfig
69
1
"""simple docstring""" from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json''' }, '''merges_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt''' }, } __UpperCamelCase = {'''allegro/herbert-base-cased''': 514} __UpperCamelCase = {} class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = HerbertTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__="</s>", **lowerCAmelCase__, ) -> int: super().__init__( lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, **lowerCAmelCase__, ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__)) + [1] return [1] + ([0] * len(lowerCAmelCase__)) + [1] + ([0] * len(lowerCAmelCase__)) + [1] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__) return tuple(lowerCAmelCase__)
69
"""simple docstring""" import argparse __UpperCamelCase = '''docs/source/_static/js/custom.js''' def UpperCAmelCase ( UpperCAmelCase ) -> int: with open(UpperCAmelCase , encoding='utf-8' , newline='\n' ) as f: snake_case_ = f.readlines() snake_case_ = 0 # First let's put the right version while not lines[index].startswith('const stableVersion =' ): index += 1 snake_case_ = f'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith('const versionMapping = {' ): index += 1 # We go until the end while not lines[index].startswith('}' ): index += 1 # We add the new version at the end lines[index - 1] += f' "v{version}": "v{version}",\n' with open(UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(UpperCAmelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') __UpperCamelCase = parser.parse_args() update_custom_js(args.version)
69
1
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def UpperCAmelCase ( ) -> Generator[int, None, None]: snake_case_ = {} snake_case_ = 2 while True: snake_case_ = factor_map.pop(UpperCAmelCase , UpperCAmelCase ) if factor: snake_case_ = factor + prime while x in factor_map: x += factor snake_case_ = factor else: snake_case_ = prime yield prime prime += 1 def UpperCAmelCase ( UpperCAmelCase = 1e10 ) -> int: snake_case_ = sieve() snake_case_ = 1 while True: snake_case_ = next(UpperCAmelCase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(UpperCAmelCase ) n += 2 if __name__ == "__main__": print(solution())
69
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Any class UpperCamelCase : def __init__( self, lowerCAmelCase__) -> Optional[int]: snake_case_ = data snake_case_ = None class UpperCamelCase : def __init__( self) -> Dict: snake_case_ = None snake_case_ = None def __iter__( self) -> Iterator[Any]: snake_case_ = self.head while self.head: yield node.data snake_case_ = node.next if node == self.head: break def __len__( self) -> int: return sum(1 for _ in self) def __repr__( self) -> str: return "->".join(str(lowerCAmelCase__) for item in iter(self)) def a_ ( self, lowerCAmelCase__) -> None: self.insert_nth(len(self), lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> None: self.insert_nth(0, lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> None: if index < 0 or index > len(self): raise IndexError('list index out of range.') snake_case_ = Node(lowerCAmelCase__) if self.head is None: snake_case_ = new_node # first node points itself snake_case_ = snake_case_ = new_node elif index == 0: # insert at head snake_case_ = self.head snake_case_ = snake_case_ = new_node else: snake_case_ = self.head for _ in range(index - 1): snake_case_ = temp.next snake_case_ = temp.next snake_case_ = new_node if index == len(self) - 1: # insert at tail snake_case_ = new_node def a_ ( self) -> str: return self.delete_nth(0) def a_ ( self) -> Any: return self.delete_nth(len(self) - 1) def a_ ( self, lowerCAmelCase__ = 0) -> Any: if not 0 <= index < len(self): raise IndexError('list index out of range.') snake_case_ = self.head if self.head == self.tail: # just one node snake_case_ = snake_case_ = None elif index == 0: # delete head node snake_case_ = self.tail.next.next snake_case_ = self.head.next else: snake_case_ = self.head for _ in range(index - 1): snake_case_ = temp.next snake_case_ = temp.next snake_case_ = temp.next.next if index == len(self) - 1: # delete at tail snake_case_ = temp return delete_node.data def a_ ( self) -> bool: return len(self) == 0 def UpperCAmelCase ( ) -> None: snake_case_ = CircularLinkedList() assert len(UpperCAmelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(UpperCAmelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(UpperCAmelCase ) == i circular_linked_list.insert_nth(UpperCAmelCase , i + 1 ) assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
69
1
"""simple docstring""" from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
69
"""simple docstring""" import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __UpperCamelCase = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. __UpperCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS) __UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __UpperCamelCase = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') __UpperCamelCase = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]: snake_case_ = None # source code of `config_class` snake_case_ = inspect.getsource(UpperCAmelCase ) snake_case_ = _re_checkpoint.findall(UpperCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('/' ): snake_case_ = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link snake_case_ = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: snake_case_ = ckpt_name break return checkpoint def UpperCAmelCase ( ) -> Union[str, Any]: snake_case_ = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue snake_case_ = get_checkpoint_from_config_class(UpperCAmelCase ) snake_case_ = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(UpperCAmelCase ) if len(UpperCAmelCase ) > 0: snake_case_ = '\n'.join(sorted(UpperCAmelCase ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
69
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import _LazyModule __UpperCamelCase = {'''tokenization_tapex''': ['''TapexTokenizer''']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
69
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCamelCase = 0 __UpperCamelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCamelCase = tuple[int, int] class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> None: snake_case_ = pos_x snake_case_ = pos_y snake_case_ = (pos_y, pos_x) snake_case_ = goal_x snake_case_ = goal_y snake_case_ = g_cost snake_case_ = parent snake_case_ = self.calculate_heuristic() snake_case_ = self.g_cost + self.h_cost def a_ ( self) -> float: snake_case_ = self.pos_x - self.goal_x snake_case_ = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCAmelCase__) + abs(lowerCAmelCase__) else: return sqrt(dy**2 + dx**2) def __lt__( self, lowerCAmelCase__) -> bool: return self.f_cost < other.f_cost class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = Node(start[1], start[0], goal[1], goal[0], 0, lowerCAmelCase__) snake_case_ = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, lowerCAmelCase__) snake_case_ = [self.start] snake_case_ = [] snake_case_ = False def a_ ( self) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() snake_case_ = self.open_nodes.pop(0) if current_node.pos == self.target.pos: return self.retrace_path(lowerCAmelCase__) self.closed_nodes.append(lowerCAmelCase__) snake_case_ = self.get_successors(lowerCAmelCase__) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCAmelCase__) else: # retrieve the best current path snake_case_ = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__)) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCAmelCase__) else: self.open_nodes.append(lowerCAmelCase__) return [self.start.pos] def a_ ( self, lowerCAmelCase__) -> list[Node]: snake_case_ = [] for action in delta: snake_case_ = parent.pos_x + action[1] snake_case_ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(lowerCAmelCase__) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCAmelCase__, lowerCAmelCase__, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, lowerCAmelCase__, )) return successors def a_ ( self, lowerCAmelCase__) -> list[TPosition]: snake_case_ = node snake_case_ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) snake_case_ = current_node.parent path.reverse() return path class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> None: snake_case_ = AStar(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = AStar(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = False def a_ ( self) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() snake_case_ = self.fwd_astar.open_nodes.pop(0) snake_case_ = self.bwd_astar.open_nodes.pop(0) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCAmelCase__, lowerCAmelCase__) self.fwd_astar.closed_nodes.append(lowerCAmelCase__) self.bwd_astar.closed_nodes.append(lowerCAmelCase__) snake_case_ = current_bwd_node snake_case_ = current_fwd_node snake_case_ = { self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase__), self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase__), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCAmelCase__) else: # retrieve the best current path snake_case_ = astar.open_nodes.pop( astar.open_nodes.index(lowerCAmelCase__)) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCAmelCase__) else: astar.open_nodes.append(lowerCAmelCase__) return [self.fwd_astar.start.pos] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> list[TPosition]: snake_case_ = self.fwd_astar.retrace_path(lowerCAmelCase__) snake_case_ = self.bwd_astar.retrace_path(lowerCAmelCase__) bwd_path.pop() bwd_path.reverse() snake_case_ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCamelCase = (0, 0) __UpperCamelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCamelCase = time.time() __UpperCamelCase = AStar(init, goal) __UpperCamelCase = a_star.search() __UpperCamelCase = time.time() - start_time print(F"""AStar execution time = {end_time:f} seconds""") __UpperCamelCase = time.time() __UpperCamelCase = BidirectionalAStar(init, goal) __UpperCamelCase = time.time() - bd_start_time print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
69
1
"""simple docstring""" __UpperCamelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __UpperCamelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> list[int]: snake_case_ = True snake_case_ = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) order.append(UpperCAmelCase ) return order def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> list[int]: snake_case_ = True snake_case_ = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return component def UpperCAmelCase ( UpperCAmelCase ) -> list[list[int]]: snake_case_ = len(UpperCAmelCase ) * [False] snake_case_ = {vert: [] for vert in range(len(UpperCAmelCase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(UpperCAmelCase ) snake_case_ = [] for i, was_visited in enumerate(UpperCAmelCase ): if not was_visited: order += topology_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) snake_case_ = [] snake_case_ = len(UpperCAmelCase ) * [False] for i in range(len(UpperCAmelCase ) ): snake_case_ = order[len(UpperCAmelCase ) - i - 1] if not visited[vert]: snake_case_ = find_components(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) components_list.append(UpperCAmelCase ) return components_list
69
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int: while a != 0: snake_case_ , snake_case_ = b % a, a return b def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int: if gcd(UpperCAmelCase , UpperCAmelCase ) != 1: snake_case_ = f'mod inverse of {a!r} and {m!r} does not exist' raise ValueError(UpperCAmelCase ) snake_case_ , snake_case_ , snake_case_ = 1, 0, a snake_case_ , snake_case_ , snake_case_ = 0, 1, m while va != 0: snake_case_ = ua // va snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
69
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __UpperCamelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwinForImageClassification''', '''SwinForMaskedImageModeling''', '''SwinModel''', '''SwinPreTrainedModel''', '''SwinBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSwinForImageClassification''', '''TFSwinForMaskedImageModeling''', '''TFSwinModel''', '''TFSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
69
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCamelCase = { '''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''], '''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''BertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BertForMaskedLM''', '''BertForMultipleChoice''', '''BertForNextSentencePrediction''', '''BertForPreTraining''', '''BertForQuestionAnswering''', '''BertForSequenceClassification''', '''BertForTokenClassification''', '''BertLayer''', '''BertLMHeadModel''', '''BertModel''', '''BertPreTrainedModel''', '''load_tf_weights_in_bert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBertEmbeddings''', '''TFBertForMaskedLM''', '''TFBertForMultipleChoice''', '''TFBertForNextSentencePrediction''', '''TFBertForPreTraining''', '''TFBertForQuestionAnswering''', '''TFBertForSequenceClassification''', '''TFBertForTokenClassification''', '''TFBertLMHeadModel''', '''TFBertMainLayer''', '''TFBertModel''', '''TFBertPreTrainedModel''', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''TFBertTokenizer'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''FlaxBertForCausalLM''', '''FlaxBertForMaskedLM''', '''FlaxBertForMultipleChoice''', '''FlaxBertForNextSentencePrediction''', '''FlaxBertForPreTraining''', '''FlaxBertForQuestionAnswering''', '''FlaxBertForSequenceClassification''', '''FlaxBertForTokenClassification''', '''FlaxBertModel''', '''FlaxBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
69
1
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = GPTaTokenizer SCREAMING_SNAKE_CASE_ = GPTaTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = {"add_prefix_space": True} SCREAMING_SNAKE_CASE_ = False def a_ ( self) -> Dict: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__)))) snake_case_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] snake_case_ = {'unk_token': '<unk>'} snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file']) snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file, 'w', encoding='utf-8') as fp: fp.write(json.dumps(lowerCAmelCase__) + '\n') with open(self.merges_file, 'w', encoding='utf-8') as fp: fp.write('\n'.join(lowerCAmelCase__)) def a_ ( self, **lowerCAmelCase__) -> int: kwargs.update(self.special_tokens_map) return GPTaTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__) def a_ ( self, **lowerCAmelCase__) -> Optional[int]: kwargs.update(self.special_tokens_map) return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> Optional[int]: snake_case_ = 'lower newer' snake_case_ = 'lower newer' return input_text, output_text def a_ ( self) -> Union[str, Any]: snake_case_ = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) snake_case_ = 'lower newer' snake_case_ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] snake_case_ = tokenizer.tokenize(lowerCAmelCase__, add_prefix_space=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokens + [tokenizer.unk_token] snake_case_ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self) -> Optional[int]: if not self.test_rust_tokenizer: return snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__) snake_case_ = 'lower newer' # Testing tokenization snake_case_ = tokenizer.tokenize(lowerCAmelCase__, add_prefix_space=lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) # Testing conversion to ids without special tokens snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) # Testing conversion to ids with special tokens snake_case_ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_prefix_space=lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) # Testing the unknown token snake_case_ = tokens + [rust_tokenizer.unk_token] snake_case_ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> List[Any]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def a_ ( self, lowerCAmelCase__=15) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): snake_case_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__) # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(lowerCAmelCase__, tokenizer_r.encode, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length') # Simple input self.assertRaises(lowerCAmelCase__, tokenizer_r.encode_plus, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length') # Simple input self.assertRaises( lowerCAmelCase__, tokenizer_r.batch_encode_plus, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length', ) # Pair input self.assertRaises(lowerCAmelCase__, tokenizer_r.encode, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length') # Pair input self.assertRaises(lowerCAmelCase__, tokenizer_r.encode_plus, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length') # Pair input self.assertRaises( lowerCAmelCase__, tokenizer_r.batch_encode_plus, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length', ) def a_ ( self) -> int: snake_case_ = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token='<pad>') # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input looooooooong', 'This is a simple input'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] snake_case_ = tokenizer.pad_token_id snake_case_ = tokenizer(lowerCAmelCase__, padding='max_length', max_length=30, return_tensors='np') snake_case_ = tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, truncate=lowerCAmelCase__, return_tensors='np') snake_case_ = tokenizer(*lowerCAmelCase__, padding='max_length', max_length=60, return_tensors='np') snake_case_ = tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, truncate=lowerCAmelCase__, return_tensors='np') # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1], 30) self.assertTrue(pad_token_id in out_s['input_ids']) self.assertTrue(0 in out_s['attention_mask']) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1], 33) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0]) self.assertFalse(0 in out_sa['attention_mask'][0]) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1]) self.assertTrue(0 in out_sa['attention_mask'][1]) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1], 60) self.assertTrue(pad_token_id in out_p['input_ids']) self.assertTrue(0 in out_p['attention_mask']) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1], 52) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0]) self.assertFalse(0 in out_pa['attention_mask'][0]) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1]) self.assertTrue(0 in out_pa['attention_mask'][1]) def a_ ( self) -> Any: snake_case_ = '$$$' snake_case_ = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=lowerCAmelCase__, add_bos_token=lowerCAmelCase__) snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = tokenizer.bos_token_id snake_case_ = tokenizer(lowerCAmelCase__) snake_case_ = tokenizer(lowerCAmelCase__) self.assertEqual(out_s.input_ids[0], lowerCAmelCase__) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids)) snake_case_ = tokenizer.decode(out_s.input_ids) snake_case_ = tokenizer.batch_decode(out_sa.input_ids) self.assertEqual(decode_s.split()[0], lowerCAmelCase__) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa)) def a_ ( self) -> int: pass def a_ ( self) -> Tuple: # TODO: change to self.get_tokenizers() when the fast version is implemented snake_case_ = [self.get_tokenizer(do_lower_case=lowerCAmelCase__, add_bos_token=lowerCAmelCase__)] for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): snake_case_ = 'Encode this.' snake_case_ = 'This one too please.' snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) encoded_sequence += tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) snake_case_ = tokenizer.encode_plus( lowerCAmelCase__, lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, ) snake_case_ = encoded_sequence_dict['input_ids'] snake_case_ = encoded_sequence_dict['special_tokens_mask'] self.assertEqual(len(lowerCAmelCase__), len(lowerCAmelCase__)) snake_case_ = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCAmelCase__) ] snake_case_ = [x for x in filtered_sequence if x is not None] self.assertEqual(lowerCAmelCase__, lowerCAmelCase__) @require_tokenizers class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Optional[int]: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 snake_case_ = AutoTokenizer.from_pretrained('facebook/opt-350m', from_slow=lowerCAmelCase__) snake_case_ = 'A photo of a cat' snake_case_ = tokenizer.encode( lowerCAmelCase__, ) self.assertEqual(lowerCAmelCase__, [2, 250, 1345, 9, 10, 4758]) tokenizer.save_pretrained('test_opt') snake_case_ = AutoTokenizer.from_pretrained('./test_opt') snake_case_ = tokenizer.encode( lowerCAmelCase__, ) self.assertEqual(lowerCAmelCase__, [2, 250, 1345, 9, 10, 4758]) def a_ ( self) -> Tuple: snake_case_ = AutoTokenizer.from_pretrained('facebook/opt-350m', use_slow=lowerCAmelCase__) snake_case_ = 'A photo of a cat' snake_case_ = tokenizer.encode( lowerCAmelCase__, ) # Same as above self.assertEqual(lowerCAmelCase__, [2, 250, 1345, 9, 10, 4758]) @unittest.skip('This test is failing because of a bug in the fast tokenizer') def a_ ( self) -> Optional[int]: snake_case_ = AutoTokenizer.from_pretrained('facebook/opt-350m', from_slow=lowerCAmelCase__) snake_case_ = 'bos' snake_case_ = tokenizer.get_vocab()['bos'] snake_case_ = 'A photo of a cat' snake_case_ = tokenizer.encode( lowerCAmelCase__, ) # We changed the bos token self.assertEqual(lowerCAmelCase__, [3_1957, 250, 1345, 9, 10, 4758]) tokenizer.save_pretrained('./tok') snake_case_ = AutoTokenizer.from_pretrained('./tok') self.assertTrue(tokenizer.is_fast) snake_case_ = tokenizer.encode( lowerCAmelCase__, ) self.assertEqual(lowerCAmelCase__, [3_1957, 250, 1345, 9, 10, 4758])
69
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } __UpperCamelCase = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: for attribute in key.split('.' ): snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ) if weight_type is not None: snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "running_mean": snake_case_ = value elif weight_type == "running_var": snake_case_ = value elif weight_type == "num_batches_tracked": snake_case_ = value elif weight_type == "inv_freq": snake_case_ = value else: snake_case_ = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) snake_case_ = True else: for key, mapped_key in MAPPING.items(): snake_case_ = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2] snake_case_ = mapped_key.replace('*' , UpperCAmelCase ) if "pos_bias_u" in name: snake_case_ = None elif "pos_bias_v" in name: snake_case_ = None elif "weight_g" in name: snake_case_ = 'weight_g' elif "weight_v" in name: snake_case_ = 'weight_v' elif "bias" in name: snake_case_ = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = 'weight' elif "running_mean" in name: snake_case_ = 'running_mean' elif "inv_freq" in name: snake_case_ = 'inv_freq' elif "running_var" in name: snake_case_ = 'running_var' elif "num_batches_tracked" in name: snake_case_ = 'num_batches_tracked' else: snake_case_ = None set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) continue if not is_used: unused_weights.append(UpperCAmelCase ) logger.warning(f'Unused weights: {unused_weights}' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: snake_case_ = full_name.split('conv_layers.' )[-1] snake_case_ = name.split('.' ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(UpperCAmelCase ) @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) -> str: if config_path is not None: snake_case_ = WavaVecaConformerConfig.from_pretrained(UpperCAmelCase , hidden_act='swish' ) else: snake_case_ = WavaVecaConformerConfig() if "rope" in checkpoint_path: snake_case_ = 'rotary' if is_finetuned: if dict_path: snake_case_ = Dictionary.load(UpperCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(UpperCAmelCase , 'vocab.json' ) if not os.path.isdir(UpperCAmelCase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase ) ) return os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(UpperCAmelCase , UpperCAmelCase ) snake_case_ = WavaVecaCTCTokenizer( UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase , ) snake_case_ = True if config.feat_extract_norm == 'layer' else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ) snake_case_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) snake_case_ = WavaVecaConformerForCTC(UpperCAmelCase ) else: snake_case_ = WavaVecaConformerForPreTraining(UpperCAmelCase ) if is_finetuned: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: snake_case_ = argparse.Namespace(task='audio_pretraining' ) snake_case_ = fairseq.tasks.setup_task(UpperCAmelCase ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase ) snake_case_ = model[0].eval() recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __UpperCamelCase = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
69
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all MVP models at https://huggingface.co/models?filter=mvp __UpperCamelCase = { '''vocab_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''', }, '''added_tokens.json''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''', }, '''merges_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''', }, } __UpperCamelCase = { '''RUCAIBox/mvp''': 1024, } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = MvpTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> List[str]: super().__init__( lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type')) snake_case_ = add_prefix_space snake_case_ = pre_tok_class(**lowerCAmelCase__) snake_case_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case_ = 'post_processor' snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__) if tokenizer_component_instance: snake_case_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case_ = tuple(state['sep']) if "cls" in state: snake_case_ = tuple(state['cls']) snake_case_ = False if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = add_prefix_space snake_case_ = True if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets: snake_case_ = trim_offsets snake_case_ = True if changes_to_apply: snake_case_ = getattr(lowerCAmelCase__, state.pop('type')) snake_case_ = component_class(**lowerCAmelCase__) setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__) @property def a_ ( self) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def a_ ( self, lowerCAmelCase__) -> str: snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value snake_case_ = value def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__) return tuple(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> Tuple: snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
69
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase ) -> list: if len(UpperCAmelCase ) <= 1: return [tuple(UpperCAmelCase )] snake_case_ = [] def generate(UpperCAmelCase , UpperCAmelCase ): snake_case_ = [0] * n res.append(tuple(UpperCAmelCase ) ) snake_case_ = 0 while i < n: if c[i] < i: if i % 2 == 0: snake_case_ , snake_case_ = arr[i], arr[0] else: snake_case_ , snake_case_ = arr[i], arr[c[i]] res.append(tuple(UpperCAmelCase ) ) c[i] += 1 snake_case_ = 0 else: snake_case_ = 0 i += 1 generate(len(UpperCAmelCase ) , UpperCAmelCase ) return res if __name__ == "__main__": __UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() __UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
69
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __UpperCamelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def UpperCAmelCase ( UpperCAmelCase ) -> int: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> int: from transformers.testing_utils import pytest_terminal_summary_main snake_case_ = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(UpperCAmelCase , id=UpperCAmelCase )
69
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __UpperCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __UpperCamelCase = TaTokenizerFast __UpperCamelCase = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''MT5EncoderModel''', '''MT5ForConditionalGeneration''', '''MT5ForQuestionAnswering''', '''MT5Model''', '''MT5PreTrainedModel''', '''MT5Stack''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model'''] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __UpperCamelCase = _LazyModule( __name__, globals()['''__file__'''], _import_structure, extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast}, module_spec=__spec__, )
69
1