code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : int ) -> List[str]: """simple docstring""" __magic_name__ = tempfile.mkdtemp() __magic_name__ = BlipImageProcessor() __magic_name__ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) __magic_name__ = BlipaProcessor(UpperCamelCase__ , UpperCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : Optional[Any] , **UpperCamelCase__ : int ) -> int: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer def _lowercase ( self : Dict , **UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Optional[int] ) -> List[Any]: """simple docstring""" __magic_name__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __magic_name__ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __magic_name__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __magic_name__ = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 ) __magic_name__ = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def _lowercase ( self : str ) -> Dict: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = self.prepare_image_inputs() __magic_name__ = image_processor(UpperCamelCase__ , return_tensors="""np""" ) __magic_name__ = processor(images=UpperCamelCase__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self : Dict ) -> int: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = """lower newer""" __magic_name__ = processor(text=UpperCamelCase__ ) __magic_name__ = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self : Dict ) -> List[str]: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = """lower newer""" __magic_name__ = self.prepare_image_inputs() __magic_name__ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def _lowercase ( self : Optional[int] ) -> List[str]: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __magic_name__ = processor.batch_decode(UpperCamelCase__ ) __magic_name__ = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = """lower newer""" __magic_name__ = self.prepare_image_inputs() __magic_name__ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
76
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
1
import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC __lowerCAmelCase : Any = parse(importlib.metadata.version('torch')) def a__ ( A_, A_, A_ ): '''simple docstring''' if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' ) __magic_name__ = STR_OPERATION_TO_FUNC[operation] if isinstance(A_, A_ ): __magic_name__ = parse(importlib.metadata.version(A_ ) ) return operation(A_, parse(A_ ) ) def a__ ( A_, A_ ): '''simple docstring''' return compare_versions(A_, A_, A_ )
76
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __lowerCAmelCase : Optional[int] = 'pt' elif is_tf_available(): __lowerCAmelCase : Union[str, Any] = 'tf' else: __lowerCAmelCase : Tuple = 'jax' class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = PerceiverTokenizer a__ = False def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" super().setUp() __magic_name__ = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" ) def _lowercase ( self : Union[str, Any] , **UpperCamelCase__ : Dict ) -> PerceiverTokenizer: """simple docstring""" return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=False , UpperCamelCase__ : int=20 , UpperCamelCase__ : Any=5 ) -> Tuple[str, list]: """simple docstring""" __magic_name__ = [] for i in range(len(UpperCamelCase__ ) ): try: __magic_name__ = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ ) except UnicodeDecodeError: pass toks.append((i, tok) ) __magic_name__ = list(filter(lambda UpperCamelCase__ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase__ ) ) __magic_name__ = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) ) if max_length is not None and len(UpperCamelCase__ ) > max_length: __magic_name__ = toks[:max_length] if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0: while len(UpperCamelCase__ ) < min_length: __magic_name__ = toks + toks # toks_str = [t[1] for t in toks] __magic_name__ = [t[0] for t in toks] # Ensure consistency __magic_name__ = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) if " " not in output_txt and len(UpperCamelCase__ ) > 1: __magic_name__ = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ ) + """ """ + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ ) ) if with_prefix_space: __magic_name__ = """ """ + output_txt __magic_name__ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) return output_txt, output_ids def _lowercase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.perceiver_tokenizer __magic_name__ = """Unicode €.""" __magic_name__ = tokenizer(UpperCamelCase__ ) __magic_name__ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ ) # decoding __magic_name__ = tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , """[CLS]Unicode €.[SEP]""" ) __magic_name__ = tokenizer("""e è é ê ë""" ) __magic_name__ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ ) # decoding __magic_name__ = tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , """[CLS]e è é ê ë[SEP]""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" ) def _lowercase ( self : int ) -> List[str]: """simple docstring""" __magic_name__ = self.perceiver_tokenizer __magic_name__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off __magic_name__ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on __magic_name__ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) if FRAMEWORK != "jax": __magic_name__ = list(batch.input_ids.numpy()[0] ) else: __magic_name__ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def _lowercase ( self : Optional[Any] ) -> Dict: """simple docstring""" __magic_name__ = self.perceiver_tokenizer __magic_name__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __magic_name__ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""" , UpperCamelCase__ ) self.assertIn("""attention_mask""" , UpperCamelCase__ ) self.assertNotIn("""decoder_input_ids""" , UpperCamelCase__ ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase__ ) def _lowercase ( self : Dict ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.perceiver_tokenizer __magic_name__ = [ """Summary of the text.""", """Another summary.""", ] __magic_name__ = tokenizer( text_target=UpperCamelCase__ , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def _lowercase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __magic_name__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ = tempfile.mkdtemp() __magic_name__ = """ He is very happy, UNwant\u00E9d,running""" __magic_name__ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) __magic_name__ = tokenizer.__class__.from_pretrained(UpperCamelCase__ ) __magic_name__ = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) shutil.rmtree(UpperCamelCase__ ) __magic_name__ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ = tempfile.mkdtemp() __magic_name__ = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) __magic_name__ = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __magic_name__ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) __magic_name__ = tokenizer.__class__.from_pretrained(UpperCamelCase__ ) __magic_name__ = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __magic_name__ = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCamelCase__ ) def _lowercase ( self : Dict ) -> Optional[int]: """simple docstring""" __magic_name__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __magic_name__ = json.load(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __magic_name__ = json.load(UpperCamelCase__ ) __magic_name__ = [F'''<extra_id_{i}>''' for i in range(125 )] __magic_name__ = added_tokens_extra_ids + [ """an_additional_special_token""" ] __magic_name__ = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(UpperCamelCase__ , UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(UpperCamelCase__ , UpperCamelCase__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __magic_name__ = tokenizer_class.from_pretrained( UpperCamelCase__ , ) self.assertIn( """an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __magic_name__ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase__ )] __magic_name__ = tokenizer_class.from_pretrained( UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , ) self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , ) def _lowercase ( self : Dict ) -> str: """simple docstring""" __magic_name__ = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , """�""" ) def _lowercase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" pass def _lowercase ( self : Optional[int] ) -> List[Any]: """simple docstring""" pass def _lowercase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" pass def _lowercase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __magic_name__ = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __magic_name__ = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""] __magic_name__ = tokenizer.convert_tokens_to_string(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
76
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
1
import math def a__ ( A_ ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a__ ( A_ = 10001 ): '''simple docstring''' try: __magic_name__ = int(A_ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) __magic_name__ = [] __magic_name__ = 2 while len(A_ ) < nth: if is_prime(A_ ): primes.append(A_ ) num += 1 else: num += 1 return primes[len(A_ ) - 1] if __name__ == "__main__": print(F'''{solution() = }''')
76
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
1
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration __lowerCAmelCase : Union[str, Any] = HfArgumentParser(InitializationArguments) __lowerCAmelCase : Optional[Any] = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization __lowerCAmelCase : Any = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks __lowerCAmelCase : Optional[int] = { 'vocab_size': len(tokenizer), 'scale_attn_by_inverse_layer_idx': True, 'reorder_and_upcast_attn': True, } # Load model config (GPT-2 large in this case) __lowerCAmelCase : Tuple = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config __lowerCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
76
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : str = logging.get_logger(__name__) __lowerCAmelCase : Dict = {} class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """llama""" a__ = ["""past_key_values"""] def __init__( self : Dict , UpperCamelCase__ : Dict=3_2000 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Optional[Any]=1_1008 , UpperCamelCase__ : str=32 , UpperCamelCase__ : int=32 , UpperCamelCase__ : int=None , UpperCamelCase__ : Tuple="silu" , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Optional[int]=1E-6 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Any=None , **UpperCamelCase__ : str , ) -> str: """simple docstring""" __magic_name__ = vocab_size __magic_name__ = max_position_embeddings __magic_name__ = hidden_size __magic_name__ = intermediate_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads # for backward compatibility if num_key_value_heads is None: __magic_name__ = num_attention_heads __magic_name__ = num_key_value_heads __magic_name__ = hidden_act __magic_name__ = initializer_range __magic_name__ = rms_norm_eps __magic_name__ = pretraining_tp __magic_name__ = use_cache __magic_name__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ , ) def _lowercase ( self : Any ) -> int: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F'''got {self.rope_scaling}''' ) __magic_name__ = self.rope_scaling.get("""type""" , UpperCamelCase__ ) __magic_name__ = self.rope_scaling.get("""factor""" , UpperCamelCase__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
76
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
1
from typing import Union import fire import torch from tqdm import tqdm def a__ ( A_, A_ = "cpu", A_ = None ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location=A_ ) for k, v in tqdm(state_dict.items() ): if not isinstance(A_, torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) __magic_name__ = v.half() if save_path is None: # overwrite src_path __magic_name__ = src_path torch.save(A_, A_ ) if __name__ == "__main__": fire.Fire(convert)
76
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
1
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Optional[int] ) -> Dict: """simple docstring""" __magic_name__ = 0 @slow def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(UpperCamelCase__ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(UpperCamelCase__ ) , 0 ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _lowercase ( self : Optional[int] ) -> Any: """simple docstring""" __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) # Check that tokenizer_type ≠ model_type __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCamelCase__ , """vocab.txt""" ) ) __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="""bert""" , use_fast=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCamelCase__ , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCamelCase__ , """merges.txt""" ) ) __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="""gpt2""" , use_fast=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @require_tokenizers def _lowercase ( self : List[str] ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCamelCase__ , """vocab.txt""" ) ) __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="""bert""" ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCamelCase__ , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCamelCase__ , """merges.txt""" ) ) __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="""gpt2""" ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : int ) -> List[str]: """simple docstring""" with pytest.raises(UpperCamelCase__ ): AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" ) @require_tokenizers def _lowercase ( self : int ) -> Tuple: """simple docstring""" for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: __magic_name__ = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" ) self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase__ ) else: self.assertEqual(tokenizer.do_lower_case , UpperCamelCase__ ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def _lowercase ( self : str ) -> Tuple: """simple docstring""" for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( UpperCamelCase__ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ): __magic_name__ = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" ) def _lowercase ( self : Tuple ) -> int: """simple docstring""" __magic_name__ = TOKENIZER_MAPPING.values() __magic_name__ = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(UpperCamelCase__ ) @require_tokenizers def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCamelCase__ ) , UpperCamelCase__ ) self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , UpperCamelCase__ ) @require_tokenizers def _lowercase ( self : str ) -> Optional[int]: """simple docstring""" __magic_name__ = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=UpperCamelCase__ ) __magic_name__ = """Hello, world. How are you?""" __magic_name__ = tokenizer.tokenize(UpperCamelCase__ ) self.assertEqual("""[UNK]""" , tokens[0] ) __magic_name__ = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=UpperCamelCase__ ) __magic_name__ = tokenizer.tokenize(UpperCamelCase__ ) self.assertEqual("""[UNK]""" , tokens[0] ) @require_tokenizers def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" ) self.assertEqual(type(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 3_0000 ) self.assertEqual(tokenizer.unk_token , """[UNK]""" ) self.assertEqual(tokenizer.padding_side , """right""" ) self.assertEqual(tokenizer.truncation_side , """right""" ) def _lowercase ( self : Dict ) -> Optional[Any]: """simple docstring""" __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase__ ) __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def _lowercase ( self : Dict ) -> Any: """simple docstring""" __magic_name__ = AutoTokenizer.from_pretrained("""ctrl""" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : List[str] ) -> List[Any]: """simple docstring""" __magic_name__ = get_tokenizer_config("""bert-base-cased""" ) __magic_name__ = config.pop("""_commit_hash""" , UpperCamelCase__ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(UpperCamelCase__ , {"""do_lower_case""": False} ) # This model does not have a tokenizer_config so we get back an empty dict. __magic_name__ = get_tokenizer_config(UpperCamelCase__ ) self.assertDictEqual(UpperCamelCase__ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase__ ) __magic_name__ = get_tokenizer_config(UpperCamelCase__ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" ) def _lowercase ( self : int ) -> Dict: """simple docstring""" try: AutoConfig.register("""custom""" , UpperCamelCase__ ) AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase__ ): AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ ) __magic_name__ = CustomTokenizer.from_pretrained(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase__ ) __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def _lowercase ( self : Dict ) -> List[str]: """simple docstring""" try: AutoConfig.register("""custom""" , UpperCamelCase__ ) # Can register in two steps AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase__ ): AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ = BertTokenizerFast.from_pretrained(UpperCamelCase__ ) bert_tokenizer.save_pretrained(UpperCamelCase__ ) __magic_name__ = CustomTokenizerFast.from_pretrained(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase__ ) __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ , use_fast=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" with self.assertRaises(UpperCamelCase__ ): __magic_name__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase__ ): __magic_name__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ ) __magic_name__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase__ ) __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version __magic_name__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase__ ) __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) @require_tokenizers def _lowercase ( self : Any ) -> str: """simple docstring""" class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = False class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = NewTokenizer a__ = False try: AutoConfig.register("""custom""" , UpperCamelCase__ ) AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ ) AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ ) # If remote code is not set, the default is to use local __magic_name__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) __magic_name__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=UpperCamelCase__ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. __magic_name__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) __magic_name__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub __magic_name__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertTrue(tokenizer.special_attribute_present ) __magic_name__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _lowercase ( self : Dict ) -> str: """simple docstring""" __magic_name__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCamelCase__ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version __magic_name__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def _lowercase ( self : Tuple ) -> Optional[int]: """simple docstring""" with self.assertRaisesRegex( UpperCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ): __magic_name__ = AutoTokenizer.from_pretrained("""bert-base""" ) def _lowercase ( self : int ) -> Dict: """simple docstring""" with self.assertRaisesRegex( UpperCamelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): __magic_name__ = AutoTokenizer.from_pretrained(UpperCamelCase__ , revision="""aaaaaa""" ) def _lowercase ( self : Optional[Any] ) -> Dict: """simple docstring""" __magic_name__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: __magic_name__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
76
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
1
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
1
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowerCAmelCase : Union[str, Any] = abspath(join(dirname(dirname(__file__)), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def a__ ( A_ ): '''simple docstring''' from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A_ ) def a__ ( A_ ): '''simple docstring''' from diffusers.utils.testing_utils import pytest_terminal_summary_main __magic_name__ = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(A_, id=A_ )
76
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
1
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __lowerCAmelCase : List[str] = TypeVar('KEY') __lowerCAmelCase : str = TypeVar('VAL') @dataclass(frozen=_A , slots=_A ) class UpperCAmelCase_ ( Generic[KEY, VAL] ): '''simple docstring''' a__ = 42 a__ = 42 class UpperCAmelCase_ ( _Item ): '''simple docstring''' def __init__( self : List[str] ) -> None: """simple docstring""" super().__init__(UpperCamelCase__ , UpperCamelCase__ ) def __bool__( self : Union[str, Any] ) -> bool: """simple docstring""" return False __lowerCAmelCase : str = _DeletedItem() class UpperCAmelCase_ ( MutableMapping[KEY, VAL] ): '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : int = 8 , UpperCamelCase__ : float = 0.75 ) -> None: """simple docstring""" __magic_name__ = initial_block_size __magic_name__ = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __magic_name__ = capacity_factor __magic_name__ = 0 def _lowercase ( self : List[str] , UpperCamelCase__ : KEY ) -> int: """simple docstring""" return hash(UpperCamelCase__ ) % len(self._buckets ) def _lowercase ( self : int , UpperCamelCase__ : int ) -> int: """simple docstring""" return (ind + 1) % len(self._buckets ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : KEY , UpperCamelCase__ : VAL ) -> bool: """simple docstring""" __magic_name__ = self._buckets[ind] if not stored: __magic_name__ = _Item(UpperCamelCase__ , UpperCamelCase__ ) self._len += 1 return True elif stored.key == key: __magic_name__ = _Item(UpperCamelCase__ , UpperCamelCase__ ) return True else: return False def _lowercase ( self : Union[str, Any] ) -> bool: """simple docstring""" __magic_name__ = len(self._buckets ) * self._capacity_factor return len(self ) >= int(UpperCamelCase__ ) def _lowercase ( self : int ) -> bool: """simple docstring""" if len(self._buckets ) <= self._initial_block_size: return False __magic_name__ = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _lowercase ( self : int , UpperCamelCase__ : int ) -> None: """simple docstring""" __magic_name__ = self._buckets __magic_name__ = [None] * new_size __magic_name__ = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def _lowercase ( self : Dict ) -> None: """simple docstring""" self._resize(len(self._buckets ) * 2 ) def _lowercase ( self : Union[str, Any] ) -> None: """simple docstring""" self._resize(len(self._buckets ) // 2 ) def _lowercase ( self : Dict , UpperCamelCase__ : KEY ) -> Iterator[int]: """simple docstring""" __magic_name__ = self._get_bucket_index(UpperCamelCase__ ) for _ in range(len(self._buckets ) ): yield ind __magic_name__ = self._get_next_ind(UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : KEY , UpperCamelCase__ : VAL ) -> None: """simple docstring""" for ind in self._iterate_buckets(UpperCamelCase__ ): if self._try_set(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): break def __setitem__( self : str , UpperCamelCase__ : KEY , UpperCamelCase__ : VAL ) -> None: """simple docstring""" if self._is_full(): self._size_up() self._add_item(UpperCamelCase__ , UpperCamelCase__ ) def __delitem__( self : int , UpperCamelCase__ : KEY ) -> None: """simple docstring""" for ind in self._iterate_buckets(UpperCamelCase__ ): __magic_name__ = self._buckets[ind] if item is None: raise KeyError(UpperCamelCase__ ) if item is _deleted: continue if item.key == key: __magic_name__ = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Tuple , UpperCamelCase__ : KEY ) -> VAL: """simple docstring""" for ind in self._iterate_buckets(UpperCamelCase__ ): __magic_name__ = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(UpperCamelCase__ ) def __len__( self : Optional[int] ) -> int: """simple docstring""" return self._len def __iter__( self : int ) -> Iterator[KEY]: """simple docstring""" yield from (item.key for item in self._buckets if item) def __repr__( self : str ) -> str: """simple docstring""" __magic_name__ = """ ,""".join( F'''{item.key}: {item.val}''' for item in self._buckets if item ) return F'''HashMap({val_string})'''
76
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
1
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor __lowerCAmelCase : Any = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : str ) -> None: """simple docstring""" warnings.warn( """The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use GLPNImageProcessor instead.""" , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
76
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
1
class UpperCAmelCase_ : # Public class to implement a graph '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[list[bool]] ) -> None: """simple docstring""" __magic_name__ = row __magic_name__ = col __magic_name__ = graph def _lowercase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[list[bool]] ) -> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[list[bool]] ) -> None: """simple docstring""" __magic_name__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __magic_name__ = [-1, 0, 1, -1, 1, -1, 0, 1] __magic_name__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase__ ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase__ ) def _lowercase ( self : int ) -> int: # And finally, count all islands. """simple docstring""" __magic_name__ = [[False for j in range(self.COL )] for i in range(self.ROW )] __magic_name__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) count += 1 return count
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __lowerCAmelCase : List[Any] = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Dict = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
1
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" debug_launcher(test_script.main ) def _lowercase ( self : Tuple ) -> str: """simple docstring""" debug_launcher(test_ops.main )
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
1
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = 42 a__ = None # Automatically constructed a__ = "dict" a__ = None a__ = field(default="""Translation""" , init=_A , repr=_A ) def __call__( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def _lowercase ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = None a__ = None a__ = None # Automatically constructed a__ = "dict" a__ = None a__ = field(default="""TranslationVariableLanguages""" , init=_A , repr=_A ) def _lowercase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __magic_name__ = sorted(set(self.languages ) ) if self.languages else None __magic_name__ = len(self.languages ) if self.languages else None def __call__( self : List[str] ) -> Dict: """simple docstring""" return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]: """simple docstring""" __magic_name__ = set(self.languages ) if self.languages and set(UpperCamelCase__ ) - lang_set: raise ValueError( F'''Some languages in example ({', '.join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({', '.join(UpperCamelCase__ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __magic_name__ = [] for lang, text in translation_dict.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __magic_name__ , __magic_name__ = zip(*sorted(UpperCamelCase__ ) ) return {"language": languages, "translation": translations} def _lowercase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
76
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
1
from __future__ import annotations from math import gcd def a__ ( A_, A_ = 2, A_ = 1, A_ = 3, ): '''simple docstring''' if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(A_, A_, A_ ) -> int: return (pow(A_, 2 ) + step) % modulus for _ in range(A_ ): # These track the position within the cycle detection logic. __magic_name__ = seed __magic_name__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. __magic_name__ = rand_fn(A_, A_, A_ ) __magic_name__ = rand_fn(A_, A_, A_ ) __magic_name__ = rand_fn(A_, A_, A_ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. __magic_name__ = gcd(hare - tortoise, A_ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. __magic_name__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( 'num', type=int, help='The value to find a divisor of', ) parser.add_argument( '--attempts', type=int, default=3, help='The number of attempts before giving up', ) __lowerCAmelCase : Dict = parser.parse_args() __lowerCAmelCase : List[Any] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F'''{args.num} is probably prime''') else: __lowerCAmelCase : Tuple = args.num // divisor print(F'''{args.num} = {divisor} * {quotient}''')
76
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
1
import math def a__ ( A_ = 100 ): '''simple docstring''' __magic_name__ = sum(i * i for i in range(1, n + 1 ) ) __magic_name__ = int(math.pow(sum(range(1, n + 1 ) ), 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F'''{solution() = }''')
76
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device __lowerCAmelCase : int = False class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : List[Any] ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) # remove text_unet pipe.remove_unused_weights() pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = """A painting of a squirrel eating a burger """ __magic_name__ = torch.manual_seed(0 ) __magic_name__ = pipe( prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCamelCase__ ) __magic_name__ = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = generator.manual_seed(0 ) __magic_name__ = pipe( prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _lowercase ( self : Optional[Any] ) -> str: """simple docstring""" __magic_name__ = VersatileDiffusionTextToImagePipeline.from_pretrained( """shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = """A painting of a squirrel eating a burger """ __magic_name__ = torch.manual_seed(0 ) __magic_name__ = pipe( prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images __magic_name__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __magic_name__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
def a__ ( A_, A_ ): '''simple docstring''' print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" ) for i in range(A_ ): for j in range(A_ ): if dist[i][j] != float("""inf""" ): print(int(dist[i][j] ), end="""\t""" ) else: print("""INF""", end="""\t""" ) print() def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = [[float("""inf""" ) for _ in range(A_ )] for _ in range(A_ )] for i in range(A_ ): for j in range(A_ ): __magic_name__ = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(A_ ): # looping through rows of graph array for i in range(A_ ): # looping through columns of graph array for j in range(A_ ): if ( dist[i][k] != float("""inf""" ) and dist[k][j] != float("""inf""" ) and dist[i][k] + dist[k][j] < dist[i][j] ): __magic_name__ = dist[i][k] + dist[k][j] _print_dist(A_, A_ ) return dist, v if __name__ == "__main__": __lowerCAmelCase : List[str] = int(input('Enter number of vertices: ')) __lowerCAmelCase : str = int(input('Enter number of edges: ')) __lowerCAmelCase : Optional[int] = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): __lowerCAmelCase : List[str] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) __lowerCAmelCase : int = int(input('Enter source:')) __lowerCAmelCase : List[str] = int(input('Enter destination:')) __lowerCAmelCase : Optional[Any] = float(input('Enter weight:')) __lowerCAmelCase : int = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
76
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
1
def a__ ( ): '''simple docstring''' __magic_name__ = [] __magic_name__ = 1 while len(A_ ) < 1e6: constant.append(str(A_ ) ) i += 1 __magic_name__ = """""".join(A_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
76
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
1
def a__ ( A_ = 10**12 ): '''simple docstring''' __magic_name__ = 1 __magic_name__ = 0 __magic_name__ = 1 __magic_name__ = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F'''{solution() = }''')
76
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=_A ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) a__ = Features({"""text""": Value("""string""" )} ) a__ = Features({"""labels""": ClassLabel} ) a__ = "text" a__ = "labels" def _lowercase ( self : Dict , UpperCamelCase__ : Tuple ) -> str: """simple docstring""" if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , UpperCamelCase__ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) __magic_name__ = copy.deepcopy(self ) __magic_name__ = self.label_schema.copy() __magic_name__ = features[self.label_column] __magic_name__ = label_schema return task_template @property def _lowercase ( self : Optional[int] ) -> Dict[str, str]: """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
76
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
1
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : NestedDataStructureLike[PathLike] , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Optional[Features] = None , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : List[str] , ) -> List[str]: """simple docstring""" super().__init__( UpperCamelCase__ , split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , num_proc=UpperCamelCase__ , **UpperCamelCase__ , ) __magic_name__ = field __magic_name__ = path_or_paths if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else {self.split: path_or_paths} __magic_name__ = Json( cache_dir=UpperCamelCase__ , data_files=UpperCamelCase__ , features=UpperCamelCase__ , field=UpperCamelCase__ , **UpperCamelCase__ , ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" if self.streaming: __magic_name__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None self.builder.download_and_prepare( download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , num_proc=self.num_proc , ) __magic_name__ = self.builder.as_dataset( split=self.split , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory ) return dataset class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : Union[PathLike, BinaryIO] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Dict , ) -> Optional[int]: """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) __magic_name__ = dataset __magic_name__ = path_or_buf __magic_name__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __magic_name__ = num_proc __magic_name__ = """utf-8""" __magic_name__ = to_json_kwargs def _lowercase ( self : List[str] ) -> int: """simple docstring""" __magic_name__ = self.to_json_kwargs.pop("""path_or_buf""" , UpperCamelCase__ ) __magic_name__ = self.to_json_kwargs.pop("""orient""" , """records""" ) __magic_name__ = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False ) __magic_name__ = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True ) __magic_name__ = self.to_json_kwargs.pop("""compression""" , UpperCamelCase__ ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , """wb""" , compression=UpperCamelCase__ ) as buffer: __magic_name__ = self._write(file_obj=UpperCamelCase__ , orient=UpperCamelCase__ , lines=UpperCamelCase__ , index=UpperCamelCase__ , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' """ was passed. Please provide a local path instead.""" ) __magic_name__ = self._write( file_obj=self.path_or_buf , orient=UpperCamelCase__ , lines=UpperCamelCase__ , index=UpperCamelCase__ , **self.to_json_kwargs ) return written def _lowercase ( self : str , UpperCamelCase__ : Any ) -> List[Any]: """simple docstring""" __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = args __magic_name__ = query_table( table=self.dataset.data , key=slice(UpperCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) __magic_name__ = batch.to_pandas().to_json( path_or_buf=UpperCamelCase__ , orient=UpperCamelCase__ , lines=UpperCamelCase__ , index=UpperCamelCase__ , **UpperCamelCase__ ) if not json_str.endswith("""\n""" ): json_str += "\n" return json_str.encode(self.encoding ) def _lowercase ( self : str , UpperCamelCase__ : BinaryIO , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , **UpperCamelCase__ : Any , ) -> int: """simple docstring""" __magic_name__ = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): __magic_name__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(UpperCamelCase__ ) else: __magic_name__ , __magic_name__ = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase__ , UpperCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): written += file_obj.write(UpperCamelCase__ ) return written
76
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
1
from __future__ import annotations def a__ ( A_, A_, A_, A_ ): # noqa: E741 '''simple docstring''' while r - l > 1: __magic_name__ = (l + r) // 2 if v[m] >= key: __magic_name__ = m else: __magic_name__ = m # noqa: E741 return r def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return 0 __magic_name__ = [0] * len(A_ ) __magic_name__ = 1 __magic_name__ = v[0] for i in range(1, len(A_ ) ): if v[i] < tail[0]: __magic_name__ = v[i] elif v[i] > tail[length - 1]: __magic_name__ = v[i] length += 1 else: __magic_name__ = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
76
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
1
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
1
import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'): __lowerCAmelCase : int = True from torch.cuda.amp import autocast __lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ = field( default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ = field( default=_A , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) a__ = field( default=_A , metadata={"""help""": """Whether to log verbose messages or not."""} , ) a__ = field( default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} ) a__ = field( default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} ) a__ = field( default=0.99_99_95 , metadata={"""help""": """Decay of gumbel temperature during training."""} ) def a__ ( A_, A_ ): '''simple docstring''' logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], ) __magic_name__ = logging.WARNING if model_args.verbose_logging: __magic_name__ = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): __magic_name__ = logging.INFO logger.setLevel(A_ ) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( default=_A , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) a__ = field( default=_A , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) a__ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) a__ = field( default="""validation""" , metadata={ """help""": ( """The name of the validation data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) a__ = field( default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , ) a__ = field( default=_A , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) a__ = field( default=1 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) a__ = field( default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) a__ = field( default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} ) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = 42 a__ = 42 a__ = "longest" a__ = None a__ = None def __call__( self : int , UpperCamelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]: """simple docstring""" __magic_name__ = self.feature_extractor.pad( UpperCamelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) __magic_name__ = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] ) __magic_name__ = batch["""input_values"""].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula __magic_name__ = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to( torch.long ) __magic_name__ = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device ) # these two operations makes sure that all values # before the output lengths indices are attended to __magic_name__ = 1 __magic_name__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices __magic_name__ = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCamelCase__ , min_masks=2 , ) return batch class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Union[str, Any]=1.0 , **UpperCamelCase__ : Optional[int] ) -> List[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = 0 __magic_name__ = max_gumbel_temp __magic_name__ = min_gumbel_temp __magic_name__ = gumbel_temp_decay def _lowercase ( self : Any , UpperCamelCase__ : nn.Module , UpperCamelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor: """simple docstring""" model.train() __magic_name__ = self._prepare_inputs(UpperCamelCase__ ) if self.use_amp: with autocast(): __magic_name__ = self.compute_loss(UpperCamelCase__ , UpperCamelCase__ ) else: __magic_name__ = self.compute_loss(UpperCamelCase__ , UpperCamelCase__ ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": __magic_name__ = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __magic_name__ = loss.sum() / (inputs["""mask_time_indices"""]).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: __magic_name__ = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(UpperCamelCase__ ).backward() elif self.use_apex: with amp.scale_loss(UpperCamelCase__ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(UpperCamelCase__ ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def a__ ( ): '''simple docstring''' __magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses() configure_logger(A_, A_ ) # Downloading and loading a dataset from the hub. __magic_name__ = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" __magic_name__ = DatasetDict() __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''', cache_dir=model_args.cache_dir, ) __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''', cache_dir=model_args.cache_dir, ) else: # make sure only "validation" and "train" keys remain" __magic_name__ = DatasetDict() __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split="""validation""", cache_dir=model_args.cache_dir, ) __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f'''{data_args.train_split_name}''', cache_dir=model_args.cache_dir, ) # only normalized-inputs-training is supported __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=A_ ) def prepare_dataset(A_ ): # check that all files have the correct sampling rate __magic_name__ , __magic_name__ = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays __magic_name__ = datasets.map( A_, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets["""train"""].column_names ) # filter audio files that are too long __magic_name__ = vectorized_datasets.filter( lambda A_ : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(A_ ): return feature_extractor(batch["""speech"""], sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` __magic_name__ = vectorized_datasets.map( A_, batched=A_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, remove_columns=vectorized_datasets["""train"""].column_names, ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 __magic_name__ = WavaVecaConfig.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing, ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( """PreTraining is only supported for ``config.do_stable_layer_norm=True`` and""" """ ``config.feat_extract_norm='layer'""" ) __magic_name__ = WavaVecaForPreTraining(A_ ) __magic_name__ = DataCollatorForWavaVecaPretraining(model=A_, feature_extractor=A_ ) __magic_name__ = WavaVecaPreTrainer( model=A_, data_collator=A_, args=A_, train_dataset=vectorized_datasets["""train"""], eval_dataset=vectorized_datasets["""validation"""], tokenizer=A_, max_gumbel_temp=model_args.max_gumbel_temperature, min_gumbel_temp=model_args.min_gumbel_temperature, gumbel_temp_decay=model_args.gumbel_temperature_decay, ) trainer.train() if __name__ == "__main__": main()
76
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
1
def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = word.split() def justify(A_, A_, A_ ) -> str: __magic_name__ = max_width - width __magic_name__ = len(A_ ) if len(A_ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: __magic_name__ = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] __magic_name__ = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] __magic_name__ = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(A_ ): num_spaces_between_words_list[i] += 1 __magic_name__ = [] for i in range(A_ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * """ """ ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(A_ ) __magic_name__ = [] __magic_name__ = [] __magic_name__ = 0 for word in words: if width + len(A_ ) + len(A_ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(A_ ) width += len(A_ ) else: # justify the line and add it to result answer.append(justify(A_, A_, A_ ) ) # reset new line and new width __magic_name__ , __magic_name__ = [word], len(A_ ) __magic_name__ = max_width - width - len(A_ ) answer.append(""" """.join(A_ ) + (remaining_spaces + 1) * """ """ ) return answer if __name__ == "__main__": from doctest import testmod testmod()
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """openai/whisper-base""" a__ = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) a__ = """transcriber""" a__ = WhisperProcessor a__ = WhisperForConditionalGeneration a__ = ["""audio"""] a__ = ["""text"""] def _lowercase ( self : int , UpperCamelCase__ : Optional[int] ) -> Any: """simple docstring""" return self.pre_processor(UpperCamelCase__ , return_tensors="""pt""" ).input_features def _lowercase ( self : Dict , UpperCamelCase__ : int ) -> Union[str, Any]: """simple docstring""" return self.model.generate(inputs=UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : Union[str, Any] ) -> Optional[int]: """simple docstring""" return self.pre_processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )[0]
76
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
1
from math import pi, sqrt def a__ ( A_ ): '''simple docstring''' if num <= 0: raise ValueError("""math domain error""" ) if num > 171.5: raise OverflowError("""math range error""" ) elif num - int(A_ ) not in (0, 0.5): raise NotImplementedError("""num must be an integer or a half-integer""" ) elif num == 0.5: return sqrt(A_ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def a__ ( ): '''simple docstring''' assert gamma(0.5 ) == sqrt(A_ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() __lowerCAmelCase : Any = 1.0 while num: __lowerCAmelCase : Optional[Any] = float(input('Gamma of: ')) print(F'''gamma({num}) = {gamma(num)}''') print('\nEnter 0 to exit...')
76
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : Optional[Any] = {'vocab_file': 'sentencepiece.bpe.model'} __lowerCAmelCase : Tuple = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, } __lowerCAmelCase : Tuple = { 'moussaKam/mbarthez': 1024, 'moussaKam/barthez': 1024, 'moussaKam/barthez-orangesum-title': 1024, } __lowerCAmelCase : Optional[Any] = '▁' class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : List[str]="</s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Any="<pad>" , UpperCamelCase__ : Optional[int]="<mask>" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : List[str] , ) -> None: """simple docstring""" __magic_name__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token __magic_name__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) __magic_name__ = vocab_file __magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase__ ) ) __magic_name__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} __magic_name__ = len(self.sp_model ) - 1 __magic_name__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __magic_name__ = [self.cls_token_id] __magic_name__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase__ )) + [1] return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1] def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __magic_name__ = [self.sep_token_id] __magic_name__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowercase ( self : Tuple ) -> str: """simple docstring""" return len(self.sp_model ) def _lowercase ( self : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Any: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __magic_name__ = self.sp_model.PieceToId(UpperCamelCase__ ) return spm_id if spm_id else self.unk_token_id def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[int] ) -> Dict: """simple docstring""" __magic_name__ = [] __magic_name__ = """""" __magic_name__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCamelCase__ ) + token __magic_name__ = True __magic_name__ = [] else: current_sub_tokens.append(UpperCamelCase__ ) __magic_name__ = False out_string += self.sp_model.decode(UpperCamelCase__ ) return out_string.strip() def __getstate__( self : Any ) -> List[str]: """simple docstring""" __magic_name__ = self.__dict__.copy() __magic_name__ = None return state def __setstate__( self : Union[str, Any] , UpperCamelCase__ : int ) -> int: """simple docstring""" __magic_name__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __magic_name__ = {} __magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , """wb""" ) as fi: __magic_name__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,)
76
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
1
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=13 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : int=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : List[Any]=24 , UpperCamelCase__ : int=2 , UpperCamelCase__ : int=6 , UpperCamelCase__ : List[Any]=37 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : str=1000 , ) -> int: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = scope __magic_name__ = range_bbox def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __magic_name__ = bbox[i, j, 3] __magic_name__ = bbox[i, j, 1] __magic_name__ = t if bbox[i, j, 2] < bbox[i, j, 0]: __magic_name__ = bbox[i, j, 2] __magic_name__ = bbox[i, j, 0] __magic_name__ = t __magic_name__ = None if self.use_input_mask: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def _lowercase ( self : Optional[Any] ) -> Tuple: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def _lowercase ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ) -> int: """simple docstring""" __magic_name__ = LiltModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , ) -> Any: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = LiltForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Dict , ) -> Optional[int]: """simple docstring""" __magic_name__ = LiltForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : str ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Union[str, Any]: """simple docstring""" return True def _lowercase ( self : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = LiltModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Dict ) -> Tuple: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> Dict: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) @slow def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = LiltModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch @slow class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(UpperCamelCase__ ) __magic_name__ = torch.tensor([[1, 2]] , device=UpperCamelCase__ ) __magic_name__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCamelCase__ ) # forward pass with torch.no_grad(): __magic_name__ = model(input_ids=UpperCamelCase__ , bbox=UpperCamelCase__ ) __magic_name__ = torch.Size([1, 2, 768] ) __magic_name__ = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=UpperCamelCase__ , ) self.assertTrue(outputs.last_hidden_state.shape , UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCamelCase__ , atol=1E-3 ) )
76
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
1
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : Any = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = PegasusTokenizer a__ = PegasusTokenizerFast a__ = True a__ = True def _lowercase ( self : List[str] ) -> List[str]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __magic_name__ = PegasusTokenizer(UpperCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowercase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" return PegasusTokenizer.from_pretrained("""google/pegasus-large""" ) def _lowercase ( self : Optional[Any] , **UpperCamelCase__ : int ) -> PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : Dict , UpperCamelCase__ : int ) -> Union[str, Any]: """simple docstring""" return ("This is a test", "This is a test") def _lowercase ( self : int ) -> Union[str, Any]: """simple docstring""" __magic_name__ = """</s>""" __magic_name__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def _lowercase ( self : int ) -> List[str]: """simple docstring""" __magic_name__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """</s>""" ) self.assertEqual(vocab_keys[-1] , """v""" ) self.assertEqual(len(UpperCamelCase__ ) , 1103 ) def _lowercase ( self : Tuple ) -> int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __magic_name__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) __magic_name__ = ( """Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important""" """ </s> <pad> <pad> <pad>""" ) __magic_name__ = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0] __magic_name__ = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Any ) -> str: """simple docstring""" __magic_name__ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __magic_name__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions.""" __magic_name__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] __magic_name__ = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 __magic_name__ = """To ensure a smooth flow of bank resolutions.""" __magic_name__ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] __magic_name__ = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _lowercase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = ["""This is going to be way too long.""" * 150, """short example"""] __magic_name__ = ["""not super long but more than 5 tokens""", """tiny"""] __magic_name__ = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" ) __magic_name__ = self._large_tokenizer( text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask. @slow def _lowercase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , ) @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = PegasusTokenizer a__ = PegasusTokenizerFast a__ = True a__ = True def _lowercase ( self : int ) -> int: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __magic_name__ = PegasusTokenizer(UpperCamelCase__ , offset=0 , mask_token_sent=UpperCamelCase__ , mask_token="""[MASK]""" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowercase ( self : int ) -> str: """simple docstring""" return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" ) def _lowercase ( self : str , **UpperCamelCase__ : int ) -> PegasusTokenizer: """simple docstring""" return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : int ) -> Optional[int]: """simple docstring""" return ("This is a test", "This is a test") def _lowercase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __magic_name__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __magic_name__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) __magic_name__ = ( """Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>""" """ <pad> <pad> <pad>""" ) __magic_name__ = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0] __magic_name__ = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) @require_torch def _lowercase ( self : int ) -> Tuple: """simple docstring""" __magic_name__ = ["""This is going to be way too long.""" * 1000, """short example"""] __magic_name__ = ["""not super long but more than 5 tokens""", """tiny"""] __magic_name__ = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" ) __magic_name__ = self._large_tokenizer( text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask. def _lowercase ( self : Optional[int] ) -> int: """simple docstring""" __magic_name__ = ( """This is an example string that is used to test the original TF implementation against the HF""" """ implementation""" ) __magic_name__ = self._large_tokenizer(UpperCamelCase__ ).input_ids self.assertListEqual( UpperCamelCase__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
76
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
1
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int=13 , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=99 , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Optional[int]=5 , UpperCamelCase__ : int=4 , UpperCamelCase__ : List[Any]=37 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Dict=4 , ) -> Dict: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_attention_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_choices def _lowercase ( self : List[str] ) -> List[str]: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_attention_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowercase ( self : Tuple ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def _lowercase ( self : Optional[int] ) -> List[Any]: """simple docstring""" __magic_name__ = FlaxAlbertModelTester(self ) @slow def _lowercase ( self : List[str] ) -> Any: """simple docstring""" for model_class_name in self.all_model_classes: __magic_name__ = model_class_name.from_pretrained("""albert-base-v2""" ) __magic_name__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase__ ) @require_flax class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __magic_name__ = FlaxAlbertModel.from_pretrained("""albert-base-v2""" ) __magic_name__ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __magic_name__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0] __magic_name__ = (1, 11, 768) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = np.array( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) )
76
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
1
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class UpperCAmelCase_ ( _A , _A , _A , unittest.TestCase ): '''simple docstring''' a__ = StableUnCLIPPipeline a__ = TEXT_TO_IMAGE_PARAMS a__ = TEXT_TO_IMAGE_BATCH_PARAMS a__ = TEXT_TO_IMAGE_IMAGE_PARAMS a__ = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false a__ = False def _lowercase ( self : Optional[int] ) -> List[Any]: """simple docstring""" __magic_name__ = 32 __magic_name__ = embedder_hidden_size # prior components torch.manual_seed(0 ) __magic_name__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) __magic_name__ = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase__ , projection_dim=UpperCamelCase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __magic_name__ = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase__ , num_layers=1 , ) torch.manual_seed(0 ) __magic_name__ = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) __magic_name__ = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase__ ) __magic_name__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) __magic_name__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) __magic_name__ = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __magic_name__ = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase__ , layers_per_block=1 , upcast_attention=UpperCamelCase__ , use_linear_projection=UpperCamelCase__ , ) torch.manual_seed(0 ) __magic_name__ = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , ) torch.manual_seed(0 ) __magic_name__ = AutoencoderKL() __magic_name__ = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any=0 ) -> Tuple: """simple docstring""" if str(UpperCamelCase__ ).startswith("""mps""" ): __magic_name__ = torch.manual_seed(UpperCamelCase__ ) else: __magic_name__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) __magic_name__ = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase__ ) def _lowercase ( self : int ) -> int: """simple docstring""" __magic_name__ = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase__ ) @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : int ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) __magic_name__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __magic_name__ = torch.Generator(device="""cpu""" ).manual_seed(0 ) __magic_name__ = pipe("""anime turle""" , generator=UpperCamelCase__ , output_type="""np""" ) __magic_name__ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __magic_name__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) __magic_name__ = pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __magic_name__ = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) __magic_name__ = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""", [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ], ) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""", """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""", """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""", """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) __magic_name__ = DatasetInfosDict.from_directory(A_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""", [ DatasetInfo(), DatasetInfo( description="""foo""", features=Features({"""a""": Value("""int32""" )} ), builder_name="""builder""", config_name="""config""", version="""1.0.0""", splits=[{"""name""": """train"""}], download_size=42, ), ], ) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = str(A_ ) dataset_info.write_to_directory(A_ ) __magic_name__ = DatasetInfo.from_directory(A_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(A_, """dataset_info.json""" ) ) def a__ ( ): '''simple docstring''' __magic_name__ = DatasetInfo( description="""foo""", citation="""bar""", homepage="""https://foo.bar""", license="""CC0""", features=Features({"""a""": Value("""int32""" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="""builder""", config_name="""config""", version="""1.0.0""", splits=[{"""name""": """train""", """num_examples""": 42}], download_checksums={}, download_size=1337, post_processing_size=442, dataset_size=1234, size_in_bytes=1337 + 442 + 1234, ) __magic_name__ = dataset_info._to_yaml_dict() assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) ) __magic_name__ = yaml.safe_dump(A_ ) __magic_name__ = yaml.safe_load(A_ ) assert dataset_info_yaml_dict == reloaded def a__ ( ): '''simple docstring''' __magic_name__ = DatasetInfo() __magic_name__ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""", [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""", features=Features({"""a""": Value("""int32""" )} ), builder_name="""builder""", config_name="""config""", version="""1.0.0""", splits=[{"""name""": """train"""}], download_size=42, ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1337 ), } ), ], ) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = str(A_ ) dataset_infos_dict.write_to_directory(A_ ) __magic_name__ = DatasetInfosDict.from_directory(A_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): __magic_name__ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml __magic_name__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(A_, """README.md""" ) )
76
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
1
import torch from transformers import AutoModel class UpperCAmelCase_ ( torch.nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : Any="sayef/fsner-bert-base-uncased" ) -> Optional[int]: """simple docstring""" super(UpperCamelCase__ , self ).__init__() __magic_name__ = AutoModel.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ ) __magic_name__ = torch.nn.CosineSimilarity(3 , 1E-08 ) __magic_name__ = torch.nn.Softmax(dim=1 ) def _lowercase ( self : int , **UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return self.bert(**UpperCamelCase__ ).last_hidden_state def _lowercase ( self : Any , UpperCamelCase__ : Union[str, Any] ) -> str: """simple docstring""" return token_embeddings.sum(2 , keepdim=UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=1 ) -> List[Any]: """simple docstring""" return self.softmax(T * self.cos(UpperCamelCase__ , UpperCamelCase__ ) ) def _lowercase ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = W_supports["""sizes"""].tolist() __magic_name__ = W_supports["""start_token_id"""].item() __magic_name__ = W_supports["""end_token_id"""].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __magic_name__ = self.BERT(**UpperCamelCase__ ) __magic_name__ = self.BERT(**UpperCamelCase__ ) __magic_name__ = None __magic_name__ = None __magic_name__ = W_supports["""input_ids"""] == start_token_id __magic_name__ = W_supports["""input_ids"""] == end_token_id for i, size in enumerate(UpperCamelCase__ ): if i == 0: __magic_name__ = 0 else: __magic_name__ = support_sizes[i - 1] __magic_name__ = S[s : s + size][start_token_masks[s : s + size]] __magic_name__ = S[s : s + size][end_token_masks[s : s + size]] __magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __magic_name__ = torch.vstack((p_starts, p_start) ) __magic_name__ = torch.vstack((p_ends, p_end) ) else: __magic_name__ = p_start __magic_name__ = p_end return p_starts, p_ends
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
1
__lowerCAmelCase : Any = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __lowerCAmelCase : str = ['a', 'b', 'c', 'd', 'e'] def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = start # add current to visited visited.append(A_ ) __magic_name__ = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __magic_name__ = topological_sort(A_, A_, A_ ) # if all neighbors visited add current to sort sort.append(A_ ) # if all vertices haven't been visited select a new one to visit if len(A_ ) != len(A_ ): for vertice in vertices: if vertice not in visited: __magic_name__ = topological_sort(A_, A_, A_ ) # return sort return sort if __name__ == "__main__": __lowerCAmelCase : List[Any] = topological_sort('a', [], []) print(sort)
76
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json', 'Salesforce/blip-vqa-capfit-large': ( 'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-base': ( 'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-large': ( 'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json' ), 'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json', 'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json', 'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json', 'Salesforce/blip-itm-large-flikr': ( 'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """blip_text_model""" def __init__( self : List[str] , UpperCamelCase__ : int=3_0524 , UpperCamelCase__ : Dict=768 , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : str=3072 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : str=8 , UpperCamelCase__ : int=512 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[str]=1E-12 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Dict=3_0522 , UpperCamelCase__ : str=2 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Union[str, Any]=102 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : List[str] , ) -> Tuple: """simple docstring""" super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , sep_token_id=UpperCamelCase__ , **UpperCamelCase__ , ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = encoder_hidden_size __magic_name__ = intermediate_size __magic_name__ = projection_dim __magic_name__ = hidden_dropout_prob __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = max_position_embeddings __magic_name__ = layer_norm_eps __magic_name__ = hidden_act __magic_name__ = initializer_range __magic_name__ = attention_probs_dropout_prob __magic_name__ = is_decoder __magic_name__ = use_cache @classmethod def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Tuple ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(UpperCamelCase__ ) __magic_name__ , __magic_name__ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the text config dict if we are loading from BlipConfig if config_dict.get("""model_type""" ) == "blip": __magic_name__ = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """blip_vision_model""" def __init__( self : Dict , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : Union[str, Any]=384 , UpperCamelCase__ : int=16 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Optional[int]=1E-10 , **UpperCamelCase__ : int , ) -> List[str]: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = intermediate_size __magic_name__ = projection_dim __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = patch_size __magic_name__ = image_size __magic_name__ = initializer_range __magic_name__ = attention_dropout __magic_name__ = layer_norm_eps __magic_name__ = hidden_act @classmethod def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Union[str, Any] ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(UpperCamelCase__ ) __magic_name__ , __magic_name__ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the vision config dict if we are loading from BlipConfig if config_dict.get("""model_type""" ) == "blip": __magic_name__ = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """blip""" a__ = True def __init__( self : int , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : int=512 , UpperCamelCase__ : int=2.6592 , UpperCamelCase__ : Optional[int]=256 , **UpperCamelCase__ : Dict , ) -> str: """simple docstring""" super().__init__(**UpperCamelCase__ ) if text_config is None: __magic_name__ = {} logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" ) if vision_config is None: __magic_name__ = {} logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" ) __magic_name__ = BlipTextConfig(**UpperCamelCase__ ) __magic_name__ = BlipVisionConfig(**UpperCamelCase__ ) __magic_name__ = self.vision_config.hidden_size __magic_name__ = projection_dim __magic_name__ = logit_scale_init_value __magic_name__ = 1.0 __magic_name__ = 0.02 __magic_name__ = image_text_hidden_size @classmethod def _lowercase ( cls : Any , UpperCamelCase__ : BlipTextConfig , UpperCamelCase__ : BlipVisionConfig , **UpperCamelCase__ : Optional[int] ) -> List[Any]: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[str]: """simple docstring""" __magic_name__ = copy.deepcopy(self.__dict__ ) __magic_name__ = self.text_config.to_dict() __magic_name__ = self.vision_config.to_dict() __magic_name__ = self.__class__.model_type return output
76
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
1
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""torch""", """transformers""", """onnx"""] def __init__( self : List[str] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[int] ) -> Any: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowercase ( cls : Tuple , *UpperCamelCase__ : str , **UpperCamelCase__ : str ) -> int: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowercase ( cls : Optional[int] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ) -> Any: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""torch""", """transformers""", """onnx"""] def __init__( self : Optional[int] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Any ) -> Any: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[Any] ) -> Any: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowercase ( cls : int , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Dict ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""torch""", """transformers""", """onnx"""] def __init__( self : Union[str, Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowercase ( cls : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Dict ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowercase ( cls : int , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Dict ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""torch""", """transformers""", """onnx"""] def __init__( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ) -> str: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowercase ( cls : Optional[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ) -> Dict: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ) -> str: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""torch""", """transformers""", """onnx"""] def __init__( self : int , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[Any] ) -> str: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowercase ( cls : Optional[int] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int] ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""torch""", """transformers""", """onnx"""] def __init__( self : List[str] , *UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> List[Any]: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str] ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def _lowercase ( cls : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Union[str, Any] ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
76
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
1
__lowerCAmelCase : Tuple = 0 # The first color of the flag. __lowerCAmelCase : Union[str, Any] = 1 # The second color of the flag. __lowerCAmelCase : int = 2 # The third color of the flag. __lowerCAmelCase : Dict = (red, white, blue) def a__ ( A_ ): '''simple docstring''' if not sequence: return [] if len(A_ ) == 1: return list(A_ ) __magic_name__ = 0 __magic_name__ = len(A_ ) - 1 __magic_name__ = 0 while mid <= high: if sequence[mid] == colors[0]: __magic_name__ , __magic_name__ = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: __magic_name__ , __magic_name__ = sequence[high], sequence[mid] high -= 1 else: __magic_name__ = f'''The elements inside the sequence must contains only {colors} values''' raise ValueError(A_ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : Union[str, Any] = input('Enter numbers separated by commas:\n').strip() __lowerCAmelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')] print(F'''{dutch_national_flag_sort(unsorted)}''')
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def a__ ( *A_, A_ = None, A_=True, A_=2 ): '''simple docstring''' from .. import __version__ __magic_name__ = take_from __magic_name__ = () if not isinstance(args[0], A_ ): __magic_name__ = (args,) for attribute, version_name, message in args: if version.parse(version.parse(A_ ).base_version ) >= version.parse(A_ ): raise ValueError( f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' f''' version {__version__} is >= {version_name}''' ) __magic_name__ = None if isinstance(A_, A_ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(A_ ),) __magic_name__ = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(A_, A_ ): values += (getattr(A_, A_ ),) __magic_name__ = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: __magic_name__ = f'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: __magic_name__ = warning + """ """ if standard_warn else """""" warnings.warn(warning + message, A_, stacklevel=A_ ) if isinstance(A_, A_ ) and len(A_ ) > 0: __magic_name__ = inspect.getouterframes(inspect.currentframe() )[1] __magic_name__ = call_frame.filename __magic_name__ = call_frame.lineno __magic_name__ = call_frame.function __magic_name__ , __magic_name__ = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(A_ ) == 0: return elif len(A_ ) == 1: return values[0] return values
76
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Dict = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """canine""" def __init__( self : int , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=1_6384 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[int]=1E-12 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : List[Any]=0XE000 , UpperCamelCase__ : List[Any]=0XE001 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : str=1_6384 , UpperCamelCase__ : Any=128 , **UpperCamelCase__ : Any , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = max_position_embeddings __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = initializer_range __magic_name__ = type_vocab_size __magic_name__ = layer_norm_eps # Character config: __magic_name__ = downsampling_rate __magic_name__ = upsampling_kernel_size __magic_name__ = num_hash_functions __magic_name__ = num_hash_buckets __magic_name__ = local_transformer_stride
76
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
1
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCAmelCase : str = { 'configuration_chinese_clip': [ 'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ChineseCLIPConfig', 'ChineseCLIPOnnxConfig', 'ChineseCLIPTextConfig', 'ChineseCLIPVisionConfig', ], 'processing_chinese_clip': ['ChineseCLIPProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = ['ChineseCLIPFeatureExtractor'] __lowerCAmelCase : int = ['ChineseCLIPImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ 'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ChineseCLIPModel', 'ChineseCLIPPreTrainedModel', 'ChineseCLIPTextModel', 'ChineseCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Optional[int] = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys __lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
1
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=13 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=99 , UpperCamelCase__ : Union[str, Any]=[1, 1, 2] , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : List[Any]=37 , UpperCamelCase__ : str="gelu_new" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Dict=False , ) -> Optional[int]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = block_sizes __magic_name__ = num_decoder_layers __magic_name__ = d_model __magic_name__ = n_head __magic_name__ = d_head __magic_name__ = d_inner __magic_name__ = hidden_act __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = 2 __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope __magic_name__ = initializer_std # Used in the tests to check the size of the first attention layer __magic_name__ = n_head # Used in the tests to check the size of the first hidden state __magic_name__ = self.d_model # Used in the tests to check the number of output hidden states/attentions __magic_name__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __magic_name__ = self.num_hidden_layers + 2 def _lowercase ( self : Optional[int] ) -> Dict: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , ) -> Optional[Any]: """simple docstring""" __magic_name__ = TFFunnelModel(config=UpperCamelCase__ ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(UpperCamelCase__ ) __magic_name__ = [input_ids, input_mask] __magic_name__ = model(UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __magic_name__ = False __magic_name__ = TFFunnelModel(config=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __magic_name__ = False __magic_name__ = TFFunnelModel(config=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def _lowercase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , ) -> Dict: """simple docstring""" __magic_name__ = TFFunnelBaseModel(config=UpperCamelCase__ ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(UpperCamelCase__ ) __magic_name__ = [input_ids, input_mask] __magic_name__ = model(UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __magic_name__ = False __magic_name__ = TFFunnelBaseModel(config=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __magic_name__ = False __magic_name__ = TFFunnelBaseModel(config=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def _lowercase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , ) -> List[Any]: """simple docstring""" __magic_name__ = TFFunnelForPreTraining(config=UpperCamelCase__ ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , ) -> Optional[Any]: """simple docstring""" __magic_name__ = TFFunnelForMaskedLM(config=UpperCamelCase__ ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , ) -> List[str]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = TFFunnelForSequenceClassification(config=UpperCamelCase__ ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : str , ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = TFFunnelForMultipleChoice(config=UpperCamelCase__ ) __magic_name__ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) __magic_name__ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) __magic_name__ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) ) __magic_name__ = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = TFFunnelForTokenClassification(config=UpperCamelCase__ ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , ) -> str: """simple docstring""" __magic_name__ = TFFunnelForQuestionAnswering(config=UpperCamelCase__ ) __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) a__ = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) a__ = False a__ = False def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = TFFunnelModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Any ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ ) def _lowercase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) def _lowercase ( self : Any ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) @require_tf class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = TFFunnelModelTester(self , base=UpperCamelCase__ ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ ) def _lowercase ( self : Dict ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase__ ) def _lowercase ( self : List[str] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : int ) -> Dict: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
76
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
1
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): __lowerCAmelCase : int = yaml.safe_load( '\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n' ) __lowerCAmelCase : List[Any] = { 'name': 'root', 'text': '', 'is_empty_text': True, 'subsections': [ { 'name': 'Dataset Card for My Dataset', 'text': '', 'is_empty_text': True, 'subsections': [ {'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []}, { 'name': 'Dataset Description', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Dataset Summary', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [], }, { 'name': 'Supported Tasks and Leaderboards', 'text': '', 'is_empty_text': True, 'subsections': [], }, {'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []}, ], }, ], } ], } __lowerCAmelCase : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __lowerCAmelCase : Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __lowerCAmelCase : str = { 'name': 'root', 'text': '', 'is_empty_text': True, 'subsections': [ { 'name': 'Dataset Card for My Dataset', 'text': '', 'is_empty_text': True, 'subsections': [ {'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []}, { 'name': 'Dataset Description', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Dataset Summary', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Extra Ignored Subsection', 'text': '', 'is_empty_text': True, 'subsections': [], } ], }, { 'name': 'Supported Tasks and Leaderboards', 'text': '', 'is_empty_text': True, 'subsections': [], }, {'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []}, ], }, ], } ], } __lowerCAmelCase : List[str] = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __lowerCAmelCase : List[Any] = ( 'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.' ) __lowerCAmelCase : List[Any] = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __lowerCAmelCase : Union[str, Any] = ( 'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.' ) __lowerCAmelCase : Any = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __lowerCAmelCase : str = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.' __lowerCAmelCase : List[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __lowerCAmelCase : Dict = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).' __lowerCAmelCase : Optional[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n' __lowerCAmelCase : Union[str, Any] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.' __lowerCAmelCase : Union[str, Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n' __lowerCAmelCase : List[str] = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.' __lowerCAmelCase : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n' __lowerCAmelCase : Dict = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.' __lowerCAmelCase : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __lowerCAmelCase : Dict = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.' __lowerCAmelCase : List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n' __lowerCAmelCase : str = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.' __lowerCAmelCase : Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __lowerCAmelCase : Dict = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.' __lowerCAmelCase : Optional[int] = '' __lowerCAmelCase : Dict = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.' __lowerCAmelCase : str = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __lowerCAmelCase : Optional[int] = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.' @pytest.mark.parametrize( """readme_md, expected_dict""", [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ], ) def a__ ( A_, A_ ): '''simple docstring''' assert ReadMe.from_string(A_, A_ ).to_dict() == expected_dict @pytest.mark.parametrize( """readme_md, expected_error""", [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ], ) def a__ ( A_, A_ ): '''simple docstring''' with pytest.raises(A_, match=re.escape(expected_error.format(path="""root""" ) ) ): __magic_name__ = ReadMe.from_string(A_, A_ ) readme.validate() @pytest.mark.parametrize( """readme_md, expected_error""", [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ], ) def a__ ( A_, A_ ): '''simple docstring''' with pytest.raises(A_, match=re.escape(expected_error.format(path="""root""" ) ) ): ReadMe.from_string(A_, A_ ) @pytest.mark.parametrize( """readme_md,""", [ (README_MULTIPLE_SAME_HEADING_1), ], ) def a__ ( A_ ): '''simple docstring''' ReadMe.from_string(A_, A_, suppress_parsing_errors=A_ ) @pytest.mark.parametrize( """readme_md, expected_dict""", [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ], ) def a__ ( A_, A_ ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ = Path(A_ ) / """README.md""" with open(A_, """w+""" ) as readme_file: readme_file.write(A_ ) __magic_name__ = ReadMe.from_readme(A_, A_ ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( """readme_md, expected_error""", [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ], ) def a__ ( A_, A_ ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ = Path(A_ ) / """README.md""" with open(A_, """w+""" ) as readme_file: readme_file.write(A_ ) __magic_name__ = expected_error.format(path=A_ ) with pytest.raises(A_, match=re.escape(A_ ) ): __magic_name__ = ReadMe.from_readme(A_, A_ ) readme.validate() @pytest.mark.parametrize( """readme_md, expected_error""", [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ], ) def a__ ( A_, A_ ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ = Path(A_ ) / """README.md""" with open(A_, """w+""" ) as readme_file: readme_file.write(A_ ) __magic_name__ = expected_error.format(path=A_ ) with pytest.raises(A_, match=re.escape(A_ ) ): ReadMe.from_readme(A_, A_ ) @pytest.mark.parametrize( """readme_md,""", [ (README_MULTIPLE_SAME_HEADING_1), ], ) def a__ ( A_ ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ = Path(A_ ) / """README.md""" with open(A_, """w+""" ) as readme_file: readme_file.write(A_ ) ReadMe.from_readme(A_, A_, suppress_parsing_errors=A_ )
76
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
1
from __future__ import annotations def a__ ( A_ ): '''simple docstring''' if not nums: raise ValueError("""List is empty""" ) return sum(A_ ) / len(A_ ) if __name__ == "__main__": import doctest doctest.testmod()
76
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
1
def a__ ( A_, A_ ): '''simple docstring''' if not (isinstance(A_, A_ ) and isinstance(A_, A_ )): raise ValueError("""longest_common_substring() takes two strings for inputs""" ) __magic_name__ = len(A_ ) __magic_name__ = len(A_ ) __magic_name__ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] __magic_name__ = 0 __magic_name__ = 0 for i in range(1, texta_length + 1 ): for j in range(1, texta_length + 1 ): if texta[i - 1] == texta[j - 1]: __magic_name__ = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: __magic_name__ = i __magic_name__ = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
1
__lowerCAmelCase : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def a__ ( A_ ): '''simple docstring''' if not isinstance(A_, A_ ): __magic_name__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A_ ) __magic_name__ = """""".join(bin(A_ )[2:].zfill(8 ) for byte in data ) __magic_name__ = len(A_ ) % 6 != 0 if padding_needed: # The padding that will be added later __magic_name__ = b"""=""" * ((6 - len(A_ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A_ ) % 6) else: __magic_name__ = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6], 2 )] for index in range(0, len(A_ ), 6 ) ).encode() + padding ) def a__ ( A_ ): '''simple docstring''' if not isinstance(A_, A_ ) and not isinstance(A_, A_ ): __magic_name__ = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A_ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A_, A_ ): try: __magic_name__ = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __magic_name__ = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A_ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __magic_name__ = encoded_data[:-padding] __magic_name__ = """""".join( bin(B64_CHARSET.index(A_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __magic_name__ = """""".join( bin(B64_CHARSET.index(A_ ) )[2:].zfill(6 ) for char in encoded_data ) __magic_name__ = [ int(binary_stream[index : index + 8], 2 ) for index in range(0, len(A_ ), 8 ) ] return bytes(A_ ) if __name__ == "__main__": import doctest doctest.testmod()
76
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
1
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = AudioLDMPipeline a__ = TEXT_TO_AUDIO_PARAMS a__ = TEXT_TO_AUDIO_BATCH_PARAMS a__ = frozenset( [ """num_inference_steps""", """num_waveforms_per_prompt""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) def _lowercase ( self : List[str] ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) __magic_name__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=UpperCamelCase__ , ) __magic_name__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) __magic_name__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) __magic_name__ = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) __magic_name__ = ClapTextModelWithProjection(UpperCamelCase__ ) __magic_name__ = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 ) __magic_name__ = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCamelCase__ , ) __magic_name__ = SpeechTaHifiGan(UpperCamelCase__ ) __magic_name__ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """vocoder""": vocoder, } return components def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ) -> Optional[int]: """simple docstring""" if str(UpperCamelCase__ ).startswith("""mps""" ): __magic_name__ = torch.manual_seed(UpperCamelCase__ ) else: __magic_name__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) __magic_name__ = { """prompt""": """A hammer hitting a wooden surface""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, } return inputs def _lowercase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __magic_name__ = """cpu""" # ensure determinism for the device-dependent torch.Generator __magic_name__ = self.get_dummy_components() __magic_name__ = AudioLDMPipeline(**UpperCamelCase__ ) __magic_name__ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = self.get_dummy_inputs(UpperCamelCase__ ) __magic_name__ = audioldm_pipe(**UpperCamelCase__ ) __magic_name__ = output.audios[0] assert audio.ndim == 1 assert len(UpperCamelCase__ ) == 256 __magic_name__ = audio[:10] __magic_name__ = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def _lowercase ( self : str ) -> Any: """simple docstring""" __magic_name__ = self.get_dummy_components() __magic_name__ = AudioLDMPipeline(**UpperCamelCase__ ) __magic_name__ = audioldm_pipe.to(UpperCamelCase__ ) __magic_name__ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = self.get_dummy_inputs(UpperCamelCase__ ) __magic_name__ = 3 * [inputs["""prompt"""]] # forward __magic_name__ = audioldm_pipe(**UpperCamelCase__ ) __magic_name__ = output.audios[0] __magic_name__ = self.get_dummy_inputs(UpperCamelCase__ ) __magic_name__ = 3 * [inputs.pop("""prompt""" )] __magic_name__ = audioldm_pipe.tokenizer( UpperCamelCase__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""pt""" , ) __magic_name__ = text_inputs["""input_ids"""].to(UpperCamelCase__ ) __magic_name__ = audioldm_pipe.text_encoder( UpperCamelCase__ , ) __magic_name__ = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state __magic_name__ = F.normalize(UpperCamelCase__ , dim=-1 ) __magic_name__ = prompt_embeds # forward __magic_name__ = audioldm_pipe(**UpperCamelCase__ ) __magic_name__ = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def _lowercase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.get_dummy_components() __magic_name__ = AudioLDMPipeline(**UpperCamelCase__ ) __magic_name__ = audioldm_pipe.to(UpperCamelCase__ ) __magic_name__ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = self.get_dummy_inputs(UpperCamelCase__ ) __magic_name__ = 3 * ["""this is a negative prompt"""] __magic_name__ = negative_prompt __magic_name__ = 3 * [inputs["""prompt"""]] # forward __magic_name__ = audioldm_pipe(**UpperCamelCase__ ) __magic_name__ = output.audios[0] __magic_name__ = self.get_dummy_inputs(UpperCamelCase__ ) __magic_name__ = 3 * [inputs.pop("""prompt""" )] __magic_name__ = [] for p in [prompt, negative_prompt]: __magic_name__ = audioldm_pipe.tokenizer( UpperCamelCase__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""pt""" , ) __magic_name__ = text_inputs["""input_ids"""].to(UpperCamelCase__ ) __magic_name__ = audioldm_pipe.text_encoder( UpperCamelCase__ , ) __magic_name__ = text_embeds.text_embeds # additional L_2 normalization over each hidden-state __magic_name__ = F.normalize(UpperCamelCase__ , dim=-1 ) embeds.append(UpperCamelCase__ ) __magic_name__ , __magic_name__ = embeds # forward __magic_name__ = audioldm_pipe(**UpperCamelCase__ ) __magic_name__ = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def _lowercase ( self : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = """cpu""" # ensure determinism for the device-dependent torch.Generator __magic_name__ = self.get_dummy_components() __magic_name__ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ ) __magic_name__ = AudioLDMPipeline(**UpperCamelCase__ ) __magic_name__ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = self.get_dummy_inputs(UpperCamelCase__ ) __magic_name__ = """egg cracking""" __magic_name__ = audioldm_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ ) __magic_name__ = output.audios[0] assert audio.ndim == 1 assert len(UpperCamelCase__ ) == 256 __magic_name__ = audio[:10] __magic_name__ = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = """cpu""" # ensure determinism for the device-dependent torch.Generator __magic_name__ = self.get_dummy_components() __magic_name__ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ ) __magic_name__ = AudioLDMPipeline(**UpperCamelCase__ ) __magic_name__ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = """A hammer hitting a wooden surface""" # test num_waveforms_per_prompt=1 (default) __magic_name__ = audioldm_pipe(UpperCamelCase__ , num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts __magic_name__ = 2 __magic_name__ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt __magic_name__ = 2 __magic_name__ = audioldm_pipe(UpperCamelCase__ , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase__ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts __magic_name__ = 2 __magic_name__ = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase__ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _lowercase ( self : Dict ) -> Optional[Any]: """simple docstring""" __magic_name__ = """cpu""" # ensure determinism for the device-dependent torch.Generator __magic_name__ = self.get_dummy_components() __magic_name__ = AudioLDMPipeline(**UpperCamelCase__ ) __magic_name__ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = audioldm_pipe.vocoder.config.sampling_rate __magic_name__ = self.get_dummy_inputs(UpperCamelCase__ ) __magic_name__ = audioldm_pipe(audio_length_in_s=0.016 , **UpperCamelCase__ ) __magic_name__ = output.audios[0] assert audio.ndim == 1 assert len(UpperCamelCase__ ) / vocoder_sampling_rate == 0.016 __magic_name__ = audioldm_pipe(audio_length_in_s=0.032 , **UpperCamelCase__ ) __magic_name__ = output.audios[0] assert audio.ndim == 1 assert len(UpperCamelCase__ ) / vocoder_sampling_rate == 0.032 def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.get_dummy_components() __magic_name__ = AudioLDMPipeline(**UpperCamelCase__ ) __magic_name__ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = ["""hey"""] __magic_name__ = audioldm_pipe(UpperCamelCase__ , num_inference_steps=1 ) __magic_name__ = output.audios.shape assert audio_shape == (1, 256) __magic_name__ = audioldm_pipe.vocoder.config config.model_in_dim *= 2 __magic_name__ = SpeechTaHifiGan(UpperCamelCase__ ).to(UpperCamelCase__ ) __magic_name__ = audioldm_pipe(UpperCamelCase__ , num_inference_steps=1 ) __magic_name__ = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _lowercase ( self : Dict ) -> Dict: """simple docstring""" self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> str: """simple docstring""" self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCamelCase__ ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _lowercase ( self : Dict ) -> Optional[Any]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase__ ) @slow class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Tuple ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple="cpu" , UpperCamelCase__ : Dict=torch.floataa , UpperCamelCase__ : Any=0 ) -> str: """simple docstring""" __magic_name__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) __magic_name__ = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 8, 128, 16) ) __magic_name__ = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) __magic_name__ = { """prompt""": """A hammer hitting a wooden surface""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 2.5, } return inputs def _lowercase ( self : List[Any] ) -> int: """simple docstring""" __magic_name__ = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" ) __magic_name__ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = self.get_inputs(UpperCamelCase__ ) __magic_name__ = 25 __magic_name__ = audioldm_pipe(**UpperCamelCase__ ).audios[0] assert audio.ndim == 1 assert len(UpperCamelCase__ ) == 8_1920 __magic_name__ = audio[7_7230:7_7240] __magic_name__ = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) __magic_name__ = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def _lowercase ( self : str ) -> Tuple: """simple docstring""" __magic_name__ = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" ) __magic_name__ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) __magic_name__ = audioldm_pipe.to(UpperCamelCase__ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __magic_name__ = self.get_inputs(UpperCamelCase__ ) __magic_name__ = audioldm_pipe(**UpperCamelCase__ ).audios[0] assert audio.ndim == 1 assert len(UpperCamelCase__ ) == 8_1920 __magic_name__ = audio[2_7780:2_7790] __magic_name__ = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) __magic_name__ = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
76
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
1
import os from pathlib import Path def a__ ( ): '''simple docstring''' from torch.utils.cpp_extension import load __magic_name__ = Path(A_ ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" __magic_name__ = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""", """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""", """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""", A_, with_cuda=A_, extra_include_paths=[str(A_ )], extra_cflags=["""-DWITH_CUDA=1"""], extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ], ) import MultiScaleDeformableAttention as MSDA return MSDA
76
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
1
import argparse import datetime def a__ ( A_ ): '''simple docstring''' __magic_name__ = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } __magic_name__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(A_ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month __magic_name__ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) __magic_name__ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day __magic_name__ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator __magic_name__ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year __magic_name__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation __magic_name__ = datetime.date(int(A_ ), int(A_ ), int(A_ ) ) # Start math if m <= 2: __magic_name__ = y - 1 __magic_name__ = m + 12 # maths var __magic_name__ = int(str(A_ )[:2] ) __magic_name__ = int(str(A_ )[2:] ) __magic_name__ = int(2.6 * m - 5.39 ) __magic_name__ = int(c / 4 ) __magic_name__ = int(k / 4 ) __magic_name__ = int(d + k ) __magic_name__ = int(t + u + v + x ) __magic_name__ = int(z - (2 * c) ) __magic_name__ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response __magic_name__ = f'''Your date {date_input}, is a {days[str(A_ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[str] = argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) __lowerCAmelCase : int = parser.parse_args() zeller(args.date_input)
76
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
1
def a__ ( A_ = 1000000 ): '''simple docstring''' __magic_name__ = limit + 1 __magic_name__ = [0] * limit for first_term in range(1, A_ ): for n in range(A_, A_, A_ ): __magic_name__ = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a __magic_name__ = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'''{solution() = }''')
76
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
1
import baseaa def a__ ( A_ ): '''simple docstring''' return baseaa.aaaencode(string.encode("""utf-8""" ) ) def a__ ( A_ ): '''simple docstring''' return baseaa.aaadecode(A_ ).decode("""utf-8""" ) if __name__ == "__main__": import doctest doctest.testmod()
76
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
1
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : Dict=99 , UpperCamelCase__ : Any=32 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[Any]=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[str]=4 , ) -> List[Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_attention_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_choices def _lowercase ( self : Tuple ) -> str: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_attention_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowercase ( self : int ) -> Optional[int]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = True a__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = FlaxRoFormerModelTester(self ) @slow def _lowercase ( self : str ) -> Optional[int]: """simple docstring""" for model_class_name in self.all_model_classes: __magic_name__ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCamelCase__ ) __magic_name__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase__ ) @require_flax class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) __magic_name__ = jnp.array([[0, 1, 2, 3, 4, 5]] ) __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = 5_0000 __magic_name__ = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed __lowerCAmelCase : Tuple = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(42) __lowerCAmelCase : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1" __lowerCAmelCase : str = "sshleifer/tiny-mbart" @require_torch class UpperCAmelCase_ ( _lowerCamelCase ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=True , ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.run_trainer( eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , ) __magic_name__ = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history if not do_eval: return __magic_name__ = [log for log in logs if """eval_loss""" in log.keys()] __magic_name__ = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats __magic_name__ = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , A__ ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def _lowercase ( self : str ) -> Optional[int]: """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def _lowercase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" self.run_seqaseq_quick(distributed=A__ ) @require_torch_multi_gpu def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.run_seqaseq_quick(distributed=A__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def _lowercase ( self : Union[str, Any] ) -> Any: """simple docstring""" self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def _lowercase ( self : int ) -> Tuple: """simple docstring""" self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def _lowercase ( self : int ) -> str: """simple docstring""" self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def _lowercase ( self : Any ) -> List[str]: """simple docstring""" self.run_seqaseq_quick( distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ ) @require_apex @require_torch_gpu def _lowercase ( self : Dict ) -> Optional[Any]: """simple docstring""" self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def _lowercase ( self : List[str] , UpperCamelCase__ : int ) -> List[Any]: """simple docstring""" __magic_name__ = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } __magic_name__ = experiments[experiment_id] __magic_name__ = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} __magic_name__ = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] ) __magic_name__ = len(re.findall(A__ , cl.err ) ) self.assertEqual(A__ , data["""n_matches"""] ) @slow def _lowercase ( self : Tuple ) -> Any: """simple docstring""" __magic_name__ = self.run_trainer( eval_steps=2 , max_len=128 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , ) # Check metrics __magic_name__ = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history __magic_name__ = [log for log in logs if """eval_loss""" in log.keys()] __magic_name__ = eval_metrics[0] __magic_name__ = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , A__ ) # test if do_predict saves generations and metrics __magic_name__ = os.listdir(A__ ) __magic_name__ = {os.path.basename(A__ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def _lowercase ( self : Tuple ) -> Dict: """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(UpperCamelCase__ : str ) -> Tuple[int, float]: __magic_name__ = """--skip_memory_metrics 0""" __magic_name__ = self.run_trainer( max_len=128 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , ) # Check metrics __magic_name__ = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history __magic_name__ = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) __magic_name__ = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) __magic_name__ = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss __magic_name__ , __magic_name__ , __magic_name__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) __magic_name__ , __magic_name__ , __magic_name__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) __magic_name__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb __magic_name__ = gpu_peak_mem_orig + gpu_alloc_mem_orig __magic_name__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb __magic_name__ = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings __magic_name__ = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( A__ , A__ , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def _lowercase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict = 3E-3 , UpperCamelCase__ : Optional[int] = "adafactor" , UpperCamelCase__ : str = False , UpperCamelCase__ : str = None , UpperCamelCase__ : List[Any] = 0 , UpperCamelCase__ : List[Any] = True , UpperCamelCase__ : Optional[Any] = True , UpperCamelCase__ : Dict = True , UpperCamelCase__ : Union[str, Any] = True , UpperCamelCase__ : Optional[Any] = None , ) -> Dict: """simple docstring""" __magic_name__ = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" __magic_name__ = self.get_auto_remove_tmp_dir() __magic_name__ = F'''\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '''.split() __magic_name__ = F'''\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n '''.split() __magic_name__ = """ --do_predict """.split() __magic_name__ = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: __magic_name__ = get_gpu_count() __magic_name__ = get_torch_dist_unique_port() __magic_name__ = F'''\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '''.split() __magic_name__ = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(A__ , env=self.get_env() ) else: __magic_name__ = ["""run_translation.py"""] + args with patch.object(A__ , """argv""" , A__ ): main() return output_dir
700
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase : Union[str, Any] = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
701
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
0
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ): '''simple docstring''' a__ = VQModel a__ = """sample""" @property def _lowercase ( self : List[str] , UpperCamelCase__ : List[str]=(32, 32) ) -> Tuple: """simple docstring""" __magic_name__ = 4 __magic_name__ = 3 __magic_name__ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ ) return {"sample": image} @property def _lowercase ( self : Tuple ) -> str: """simple docstring""" return (3, 32, 32) @property def _lowercase ( self : List[Any] ) -> Any: """simple docstring""" return (3, 32, 32) def _lowercase ( self : Optional[int] ) -> Dict: """simple docstring""" __magic_name__ = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 3, } __magic_name__ = self.dummy_input return init_dict, inputs_dict def _lowercase ( self : List[str] ) -> Dict: """simple docstring""" pass def _lowercase ( self : Optional[Any] ) -> List[str]: """simple docstring""" pass def _lowercase ( self : Any ) -> Union[str, Any]: """simple docstring""" __magic_name__ , __magic_name__ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(UpperCAmelCase_ ) __magic_name__ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _lowercase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __magic_name__ = VQModel.from_pretrained("""fusing/vqgan-dummy""" ) model.to(UpperCAmelCase_ ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) __magic_name__ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) __magic_name__ = image.to(UpperCAmelCase_ ) with torch.no_grad(): __magic_name__ = model(UpperCAmelCase_ ).sample __magic_name__ = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __magic_name__ = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] ) # fmt: on self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
0
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str]=100 , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : str=2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : str=4 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Optional[Any]=37 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[int]=10 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=[0, 1, 2, 3] , ) -> Tuple: """simple docstring""" __magic_name__ = parent __magic_name__ = 100 __magic_name__ = batch_size __magic_name__ = image_size __magic_name__ = patch_size __magic_name__ = num_channels __magic_name__ = is_training __magic_name__ = use_labels __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = scope __magic_name__ = out_indices __magic_name__ = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __magic_name__ = (image_size // patch_size) ** 2 __magic_name__ = num_patches + 1 def _lowercase ( self : Dict ) -> List[str]: """simple docstring""" __magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __magic_name__ = self.get_config() return config, pixel_values, labels, pixel_labels def _lowercase ( self : List[str] ) -> Any: """simple docstring""" return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _lowercase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> int: """simple docstring""" __magic_name__ = BeitModel(config=A_ ) model.to(A_ ) model.eval() __magic_name__ = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = BeitForMaskedImageModeling(config=A_ ) model.to(A_ ) model.eval() __magic_name__ = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowercase ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : int ) -> str: """simple docstring""" __magic_name__ = self.type_sequence_label_size __magic_name__ = BeitForImageClassification(A_ ) model.to(A_ ) model.eval() __magic_name__ = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __magic_name__ = 1 __magic_name__ = BeitForImageClassification(A_ ) model.to(A_ ) model.eval() __magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __magic_name__ = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowercase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ) -> List[Any]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = BeitForSemanticSegmentation(A_ ) model.to(A_ ) model.eval() __magic_name__ = model(A_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) __magic_name__ = model(A_ , labels=A_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _lowercase ( self : Dict ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs __magic_name__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a__ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) a__ = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) a__ = False a__ = False a__ = False def _lowercase ( self : Optional[Any] ) -> List[str]: """simple docstring""" __magic_name__ = BeitModelTester(self ) __magic_name__ = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 ) def _lowercase ( self : str ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""BEiT does not use inputs_embeds""" ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="""BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" ) def _lowercase ( self : List[str] ) -> List[str]: """simple docstring""" pass def _lowercase ( self : Any ) -> List[Any]: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __magic_name__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(A_ ) __magic_name__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ = [*signature.parameters.keys()] __magic_name__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , A_ ) def _lowercase ( self : Optional[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _lowercase ( self : Optional[int] ) -> Dict: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def _lowercase ( self : Dict ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A_ ) def _lowercase ( self : Tuple ) -> List[Any]: """simple docstring""" if not self.model_tester.is_training: return __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(A_ ), BeitForMaskedImageModeling]: continue __magic_name__ = model_class(A_ ) model.to(A_ ) model.train() __magic_name__ = self._prepare_for_class(A_ , A_ , return_labels=A_ ) __magic_name__ = model(**A_ ).loss loss.backward() def _lowercase ( self : Any ) -> Union[str, Any]: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __magic_name__ = False __magic_name__ = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(A_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue __magic_name__ = model_class(A_ ) model.gradient_checkpointing_enable() model.to(A_ ) model.train() __magic_name__ = self._prepare_for_class(A_ , A_ , return_labels=A_ ) __magic_name__ = model(**A_ ).loss loss.backward() def _lowercase ( self : int ) -> Dict: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = _config_zero_init(A_ ) for model_class in self.all_model_classes: __magic_name__ = model_class(config=A_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _lowercase ( self : List[Any] ) -> Optional[int]: """simple docstring""" for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = BeitModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def a__ ( ): '''simple docstring''' __magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowercase ( self : Optional[int] ) -> List[Any]: """simple docstring""" return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def _lowercase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __magic_name__ = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(A_ ) __magic_name__ = self.default_image_processor __magic_name__ = prepare_img() __magic_name__ = image_processor(images=A_ , return_tensors="""pt""" ).pixel_values.to(A_ ) # prepare bool_masked_pos __magic_name__ = torch.ones((1, 196) , dtype=torch.bool ).to(A_ ) # forward pass with torch.no_grad(): __magic_name__ = model(pixel_values=A_ , bool_masked_pos=A_ ) __magic_name__ = outputs.logits # verify the logits __magic_name__ = torch.Size((1, 196, 8192) ) self.assertEqual(logits.shape , A_ ) __magic_name__ = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(A_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1E-2 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(A_ ) __magic_name__ = self.default_image_processor __magic_name__ = prepare_img() __magic_name__ = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ ) # forward pass with torch.no_grad(): __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits # verify the logits __magic_name__ = torch.Size((1, 1000) ) self.assertEqual(logits.shape , A_ ) __magic_name__ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(A_ ) self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1E-4 ) ) __magic_name__ = 281 self.assertEqual(logits.argmax(-1 ).item() , A_ ) @slow def _lowercase ( self : List[Any] ) -> str: """simple docstring""" __magic_name__ = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to( A_ ) __magic_name__ = self.default_image_processor __magic_name__ = prepare_img() __magic_name__ = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ ) # forward pass with torch.no_grad(): __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits # verify the logits __magic_name__ = torch.Size((1, 2_1841) ) self.assertEqual(logits.shape , A_ ) __magic_name__ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(A_ ) self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1E-4 ) ) __magic_name__ = 2396 self.assertEqual(logits.argmax(-1 ).item() , A_ ) @slow def _lowercase ( self : int ) -> Tuple: """simple docstring""" __magic_name__ = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) __magic_name__ = model.to(A_ ) __magic_name__ = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ ) __magic_name__ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" ) __magic_name__ = Image.open(ds[0]["""file"""] ) __magic_name__ = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ ) # forward pass with torch.no_grad(): __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits # verify the logits __magic_name__ = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , A_ ) __magic_name__ = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" ) if is_pillow_less_than_a: __magic_name__ = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=A_ , ) else: __magic_name__ = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=A_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1E-4 ) ) @slow def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) __magic_name__ = model.to(A_ ) __magic_name__ = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ ) __magic_name__ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" ) __magic_name__ = Image.open(ds[0]["""file"""] ) __magic_name__ = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ ) # forward pass with torch.no_grad(): __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits.detach().cpu() __magic_name__ = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(500, 300)] ) __magic_name__ = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , A_ ) __magic_name__ = image_processor.post_process_semantic_segmentation(outputs=A_ ) __magic_name__ = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , A_ )
703
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
0
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(_lowercase ) == len(_lowercase ), f'''{len(_lowercase )} != {len(_lowercase )}''' dest_layers.load_state_dict(layers_to_copy.state_dict() ) __lowerCAmelCase : Dict = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } __lowerCAmelCase : str = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def a__ ( A_, A_ ): '''simple docstring''' try: __magic_name__ = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first''' f''' {n_student}''' ) return list(range(_lowercase ) ) def a__ ( A_, A_ ): '''simple docstring''' if n_student > n_teacher: raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' ) elif n_teacher == n_student: return list(range(_lowercase ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def a__ ( A_, A_ = "student", A_ = None, A_ = None, A_=False, A_=None, A_=None, **A_, ): '''simple docstring''' __magic_name__ = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.' assert (e is not None) or (d is not None), _msg if isinstance(_lowercase, _lowercase ): AutoTokenizer.from_pretrained(_lowercase ).save_pretrained(_lowercase ) # purely for convenience __magic_name__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase ).eval() else: assert isinstance(_lowercase, _lowercase ), f'''teacher must be a model or string got type {type(_lowercase )}''' __magic_name__ = teacher.config.to_diff_dict() try: __magic_name__ = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: __magic_name__ = teacher_e if d is None: __magic_name__ = teacher_d init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} ) except AttributeError: # T5 if hasattr(teacher.config, """num_encoder_layers""" ): __magic_name__ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: __magic_name__ = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: __magic_name__ = teacher_e if d is None: __magic_name__ = teacher_d if hasattr(teacher.config, """num_encoder_layers""" ): init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} ) else: init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(_lowercase ) # Copy weights __magic_name__ = teacher.config_class(**_lowercase ) __magic_name__ = AutoModelForSeqaSeqLM.from_config(_lowercase ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. __magic_name__ = student.load_state_dict(teacher.state_dict(), strict=_lowercase ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save __magic_name__ = list(range(_lowercase ) ), list(range(_lowercase ) ) logger.info( f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to''' f''' {save_path}''' ) student.save_pretrained(_lowercase ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: __magic_name__ = pick_layers_to_copy(_lowercase, _lowercase ) if d_layers_to_copy is None: __magic_name__ = pick_layers_to_copy(_lowercase, _lowercase ) try: if hasattr( _lowercase, """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers, student.prophetnet.encoder.layers, _lowercase ) copy_layers(teacher.prophetnet.decoder.layers, student.prophetnet.decoder.layers, _lowercase ) else: copy_layers(teacher.model.encoder.layers, student.model.encoder.layers, _lowercase ) copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, _lowercase ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block, student.encoder.block, _lowercase ) copy_layers(teacher.decoder.block, student.decoder.block, _lowercase ) logger.info( f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' ) __magic_name__ = { 'teacher_type': teacher.config.model_type, 'copied_encoder_layers': e_layers_to_copy, 'copied_decoder_layers': d_layers_to_copy, } student.save_pretrained(_lowercase ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
704
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : List[str] = { "configuration_time_series_transformer": [ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimeSeriesTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_=False ): '''simple docstring''' __magic_name__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" __magic_name__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def a__ ( A_, A_, A_=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: __magic_name__ = '' else: __magic_name__ = 'deit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __magic_name__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) __magic_name__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __magic_name__ = in_proj_weight[ : config.hidden_size, : ] __magic_name__ = in_proj_bias[: config.hidden_size] __magic_name__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __magic_name__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __magic_name__ = in_proj_weight[ -config.hidden_size :, : ] __magic_name__ = in_proj_bias[-config.hidden_size :] def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = dct.pop(lowerCamelCase_ ) __magic_name__ = val def a__ ( ): '''simple docstring''' __magic_name__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' __magic_name__ = Image.open(requests.get(lowerCamelCase_, stream=lowerCamelCase_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = DeiTConfig() # all deit models have fine-tuned heads __magic_name__ = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size __magic_name__ = 1000 __magic_name__ = 'huggingface/label-files' __magic_name__ = 'imagenet-1k-id2label.json' __magic_name__ = json.load(open(hf_hub_download(lowerCamelCase_, lowerCamelCase_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} __magic_name__ = int(deit_name[-6:-4] ) __magic_name__ = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): __magic_name__ = 192 __magic_name__ = 768 __magic_name__ = 12 __magic_name__ = 3 elif deit_name[9:].startswith("""small""" ): __magic_name__ = 384 __magic_name__ = 1536 __magic_name__ = 12 __magic_name__ = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): __magic_name__ = 1024 __magic_name__ = 4096 __magic_name__ = 24 __magic_name__ = 16 # load original model from timm __magic_name__ = timm.create_model(lowerCamelCase_, pretrained=lowerCamelCase_ ) timm_model.eval() # load state_dict of original model, remove and rename some keys __magic_name__ = timm_model.state_dict() __magic_name__ = create_rename_keys(lowerCamelCase_, lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ) read_in_q_k_v(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ) # load HuggingFace model __magic_name__ = DeiTForImageClassificationWithTeacher(lowerCamelCase_ ).eval() model.load_state_dict(lowerCamelCase_ ) # Check outputs on an image, prepared by DeiTImageProcessor __magic_name__ = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 __magic_name__ = DeiTImageProcessor(size=lowerCamelCase_, crop_size=config.image_size ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = encoding['pixel_values'] __magic_name__ = model(lowerCamelCase_ ) __magic_name__ = timm_model(lowerCamelCase_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCamelCase_, outputs.logits, atol=1e-3 ) Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __lowerCAmelCase : Dict = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
706
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : Dict = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = "▁" __lowerCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"} __lowerCAmelCase : str = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model" ), } } __lowerCAmelCase : Optional[Any] = { "facebook/nllb-200-distilled-600M": 1024, } # fmt: off __lowerCAmelCase : Dict = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class UpperCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = ['input_ids', 'attention_mask'] a__ = [] a__ = [] def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple="<s>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : List[Any]="<mask>" , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple = None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=False , **UpperCamelCase__ : List[Any] , ) -> Optional[Any]: """simple docstring""" __magic_name__ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token __magic_name__ = {} if sp_model_kwargs is None else sp_model_kwargs __magic_name__ = legacy_behaviour super().__init__( bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , tokenizer_file=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_lowercase , **_lowercase , ) __magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowercase ) ) __magic_name__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token __magic_name__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __magic_name__ = 1 __magic_name__ = len(self.sp_model ) __magic_name__ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowercase ) } __magic_name__ = {v: k for k, v in self.lang_code_to_id.items()} __magic_name__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __magic_name__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __magic_name__ = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) __magic_name__ = src_lang if src_lang is not None else 'eng_Latn' __magic_name__ = self.lang_code_to_id[self._src_lang] __magic_name__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.__dict__.copy() __magic_name__ = None __magic_name__ = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[Any] , UpperCamelCase__ : List[str] ) -> int: """simple docstring""" __magic_name__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __magic_name__ = {} __magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _lowercase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> None: """simple docstring""" __magic_name__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _lowercase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple = None , UpperCamelCase__ : List[str] = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase ) __magic_name__ = [1] * len(self.prefix_tokens ) __magic_name__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_lowercase )) + suffix_ones return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowercase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict = None ) -> List[int]: """simple docstring""" __magic_name__ = [self.sep_token_id] __magic_name__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str] ) -> List[Any]: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) __magic_name__ = src_lang __magic_name__ = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase ) __magic_name__ = self.convert_tokens_to_ids(_lowercase ) __magic_name__ = tgt_lang_id return inputs def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> List[str]: """simple docstring""" return self.sp_model.encode(_lowercase , out_type=_lowercase ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : int ) -> Any: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __magic_name__ = self.sp_model.PieceToId(_lowercase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowercase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ) -> str: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : int ) -> List[str]: """simple docstring""" __magic_name__ = ''.join(_lowercase ).replace(_lowercase , """ """ ).strip() return out_string def _lowercase ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_lowercase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ = os.path.join( _lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowercase ) elif not os.path.isfile(self.vocab_file ): with open(_lowercase , """wb""" ) as fi: __magic_name__ = self.sp_model.serialized_model_proto() fi.write(_lowercase ) return (out_vocab_file,) def _lowercase ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Any = "eng_Latn" , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : Any = "fra_Latn" , **UpperCamelCase__ : Dict , ) -> BatchEncoding: """simple docstring""" __magic_name__ = src_lang __magic_name__ = tgt_lang return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase ) def _lowercase ( self : str ) -> Tuple: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> None: """simple docstring""" __magic_name__ = self.lang_code_to_id[src_lang] if self.legacy_behaviour: __magic_name__ = [] __magic_name__ = [self.eos_token_id, self.cur_lang_code] else: __magic_name__ = [self.cur_lang_code] __magic_name__ = [self.eos_token_id] def _lowercase ( self : List[Any] , UpperCamelCase__ : str ) -> None: """simple docstring""" __magic_name__ = self.lang_code_to_id[lang] if self.legacy_behaviour: __magic_name__ = [] __magic_name__ = [self.eos_token_id, self.cur_lang_code] else: __magic_name__ = [self.cur_lang_code] __magic_name__ = [self.eos_token_id]
707
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Tuple = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = ['MBartTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = ['MBartTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = [ 'MBART_PRETRAINED_MODEL_ARCHIVE_LIST', 'MBartForCausalLM', 'MBartForConditionalGeneration', 'MBartForQuestionAnswering', 'MBartForSequenceClassification', 'MBartModel', 'MBartPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Dict = [ 'TFMBartForConditionalGeneration', 'TFMBartModel', 'TFMBartPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = [ 'FlaxMBartForConditionalGeneration', 'FlaxMBartForQuestionAnswering', 'FlaxMBartForSequenceClassification', 'FlaxMBartModel', 'FlaxMBartPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys __lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
708
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = DPTConfig() if "large" in checkpoint_url: __magic_name__ = 1024 __magic_name__ = 4096 __magic_name__ = 24 __magic_name__ = 16 __magic_name__ = [5, 11, 17, 23] __magic_name__ = [256, 512, 1024, 1024] __magic_name__ = (1, 384, 384) if "ade" in checkpoint_url: __magic_name__ = True __magic_name__ = 150 __magic_name__ = """huggingface/label-files""" __magic_name__ = """ade20k-id2label.json""" __magic_name__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase, __lowerCAmelCase, repo_type="""dataset""" ) ), """r""" ) ) __magic_name__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} __magic_name__ = [1, 150, 480, 480] return config, expected_shape def a__ ( A_ ): '''simple docstring''' __magic_name__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase, __lowerCAmelCase ) def a__ ( A_ ): '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): __magic_name__ = name.replace("""pretrained.model""", """dpt.encoder""" ) if "pretrained.model" in name: __magic_name__ = name.replace("""pretrained.model""", """dpt.embeddings""" ) if "patch_embed" in name: __magic_name__ = name.replace("""patch_embed""", """patch_embeddings""" ) if "pos_embed" in name: __magic_name__ = name.replace("""pos_embed""", """position_embeddings""" ) if "attn.proj" in name: __magic_name__ = name.replace("""attn.proj""", """attention.output.dense""" ) if "proj" in name and "project" not in name: __magic_name__ = name.replace("""proj""", """projection""" ) if "blocks" in name: __magic_name__ = name.replace("""blocks""", """layer""" ) if "mlp.fc1" in name: __magic_name__ = name.replace("""mlp.fc1""", """intermediate.dense""" ) if "mlp.fc2" in name: __magic_name__ = name.replace("""mlp.fc2""", """output.dense""" ) if "norm1" in name: __magic_name__ = name.replace("""norm1""", """layernorm_before""" ) if "norm2" in name: __magic_name__ = name.replace("""norm2""", """layernorm_after""" ) if "scratch.output_conv" in name: __magic_name__ = name.replace("""scratch.output_conv""", """head""" ) if "scratch" in name: __magic_name__ = name.replace("""scratch""", """neck""" ) if "layer1_rn" in name: __magic_name__ = name.replace("""layer1_rn""", """convs.0""" ) if "layer2_rn" in name: __magic_name__ = name.replace("""layer2_rn""", """convs.1""" ) if "layer3_rn" in name: __magic_name__ = name.replace("""layer3_rn""", """convs.2""" ) if "layer4_rn" in name: __magic_name__ = name.replace("""layer4_rn""", """convs.3""" ) if "refinenet" in name: __magic_name__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 __magic_name__ = name.replace(f'''refinenet{layer_idx}''', f'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: __magic_name__ = name.replace("""out_conv""", """projection""" ) if "resConfUnit1" in name: __magic_name__ = name.replace("""resConfUnit1""", """residual_layer1""" ) if "resConfUnit2" in name: __magic_name__ = name.replace("""resConfUnit2""", """residual_layer2""" ) if "conv1" in name: __magic_name__ = name.replace("""conv1""", """convolution1""" ) if "conv2" in name: __magic_name__ = name.replace("""conv2""", """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: __magic_name__ = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: __magic_name__ = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: __magic_name__ = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: __magic_name__ = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: __magic_name__ = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: __magic_name__ = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: __magic_name__ = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: __magic_name__ = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: __magic_name__ = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: __magic_name__ = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: __magic_name__ = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: __magic_name__ = name.replace("""pretrained""", """dpt""" ) if "bn" in name: __magic_name__ = name.replace("""bn""", """batch_norm""" ) if "head" in name: __magic_name__ = name.replace("""head""", """head.head""" ) if "encoder.norm" in name: __magic_name__ = name.replace("""encoder.norm""", """layernorm""" ) if "auxlayer" in name: __magic_name__ = name.replace("""auxlayer""", """auxiliary_head.head""" ) return name def a__ ( A_, A_ ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __magic_name__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) __magic_name__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __magic_name__ = in_proj_weight[: config.hidden_size, :] __magic_name__ = in_proj_bias[: config.hidden_size] __magic_name__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __magic_name__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __magic_name__ = in_proj_weight[ -config.hidden_size :, : ] __magic_name__ = in_proj_bias[-config.hidden_size :] def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(__lowerCAmelCase, stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ , __magic_name__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL __magic_name__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase, map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): __magic_name__ = state_dict.pop(__lowerCAmelCase ) __magic_name__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase, __lowerCAmelCase ) # load HuggingFace model __magic_name__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image __magic_name__ = 480 if """ade""" in checkpoint_url else 384 __magic_name__ = DPTImageProcessor(size=__lowerCAmelCase ) __magic_name__ = prepare_img() __magic_name__ = image_processor(__lowerCAmelCase, return_tensors="""pt""" ) # forward pass __magic_name__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth # Assert logits __magic_name__ = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ) if "ade" in checkpoint_url: __magic_name__ = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ) assert outputs.shape == torch.Size(__lowerCAmelCase ) assert ( torch.allclose(outputs[0, 0, :3, :3], __lowerCAmelCase, atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3], __lowerCAmelCase ) ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(__lowerCAmelCase, __lowerCAmelCase ), organization="""nielsr""", commit_message="""Add model""", use_temp_dir=__lowerCAmelCase, ) image_processor.push_to_hub( repo_path_or_name=Path(__lowerCAmelCase, __lowerCAmelCase ), organization="""nielsr""", commit_message="""Add image processor""", use_temp_dir=__lowerCAmelCase, ) if __name__ == "__main__": __lowerCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) __lowerCAmelCase : List[str] = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
709
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
0
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=99 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : int=5 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[Any]=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[int]=4 , ) -> int: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_attention_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_choices def _lowercase ( self : str ) -> Optional[Any]: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_attention_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowercase ( self : Dict ) -> Dict: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() __magic_name__ = config_and_inputs __magic_name__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def _lowercase ( self : Dict ) -> int: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() __magic_name__ = config_and_inputs __magic_name__ = True __magic_name__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = True a__ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def _lowercase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = FlaxRobertaModelTester(self ) @slow def _lowercase ( self : List[str] ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: __magic_name__ = model_class_name.from_pretrained("""roberta-base""" , from_pt=UpperCamelCase_ ) __magic_name__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase_ )
710
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
0
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.17.0.dev0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt') __lowerCAmelCase : Tuple = logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) a__ = field( default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , ) a__ = field( default=10_24 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ = field( default=_A , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) a__ = field( default=_A , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) a__ = field( default=_A , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a__ = field( default=_A , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) a__ = field( default=_A , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) a__ = field( default=_A , metadata={"""help""": """A csv or a json file containing the training data."""} ) a__ = field( default=_A , metadata={"""help""": """A csv or a json file containing the validation data."""} ) a__ = field(default=_A , metadata={"""help""": """A csv or a json file containing the test data."""} ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __magic_name__ = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __magic_name__ = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( default=_A , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ = field( default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ = field( default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ = field( default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ = field( default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a__ = field( default=_A , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def a__ ( ): '''simple docstring''' __magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __magic_name__ = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], ) __magic_name__ = training_args.get_process_log_level() logger.setLevel(_lowerCAmelCase ) datasets.utils.logging.set_verbosity(_lowerCAmelCase ) transformers.utils.logging.set_verbosity(_lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __magic_name__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __magic_name__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __magic_name__ = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __magic_name__ = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __magic_name__ = data_args.train_file.split(""".""" )[-1] __magic_name__ = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __magic_name__ = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f'''load a local file for {key}: {data_files[key]}''' ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __magic_name__ = load_dataset("""csv""", data_files=_lowerCAmelCase, cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __magic_name__ = load_dataset("""json""", data_files=_lowerCAmelCase, cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __magic_name__ = raw_datasets["train"].features["label"].names __magic_name__ = len(_lowerCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __magic_name__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=_lowerCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # load tapex tokenizer __magic_name__ = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, add_prefix_space=_lowerCAmelCase, ) __magic_name__ = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=_lowerCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Padding strategy if data_args.pad_to_max_length: __magic_name__ = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __magic_name__ = False # Some models have set the order of the labels to use, so let's make sure we do use it. __magic_name__ = {"Refused": 0, "Entailed": 1} __magic_name__ = {0: "Refused", 1: "Entailed"} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __magic_name__ = min(data_args.max_seq_length, tokenizer.model_max_length ) def preprocess_tabfact_function(A_ ): # Tokenize the texts def _convert_table_text_to_pandas(A_ ): __magic_name__ = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __magic_name__ = pd.DataFrame.from_records(_table_content[1:], columns=_table_content[0] ) return _table_pd __magic_name__ = examples["statement"] __magic_name__ = list(map(_convert_table_text_to_pandas, examples["""table_text"""] ) ) __magic_name__ = tokenizer(_lowerCAmelCase, _lowerCAmelCase, padding=_lowerCAmelCase, max_length=_lowerCAmelCase, truncation=_lowerCAmelCase ) __magic_name__ = examples["label"] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __magic_name__ = raw_datasets.map( _lowerCAmelCase, batched=_lowerCAmelCase, load_from_cache_file=not data_args.overwrite_cache, desc="""Running tokenizer on dataset""", ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __magic_name__ = raw_datasets["train"] if data_args.max_train_samples is not None: __magic_name__ = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __magic_name__ = raw_datasets["validation"] if data_args.max_eval_samples is not None: __magic_name__ = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __magic_name__ = raw_datasets["test"] if data_args.max_predict_samples is not None: __magic_name__ = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_lowerCAmelCase ) ), 3 ): logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(A_ ): __magic_name__ = p.predictions[0] if isinstance(p.predictions, _lowerCAmelCase ) else p.predictions __magic_name__ = np.argmax(_lowerCAmelCase, axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __magic_name__ = default_data_collator elif training_args.fpaa: __magic_name__ = DataCollatorWithPadding(_lowerCAmelCase, pad_to_multiple_of=8 ) else: __magic_name__ = None # Initialize our Trainer __magic_name__ = Trainer( model=_lowerCAmelCase, args=_lowerCAmelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=_lowerCAmelCase, tokenizer=_lowerCAmelCase, data_collator=_lowerCAmelCase, ) # Training if training_args.do_train: __magic_name__ = None if training_args.resume_from_checkpoint is not None: __magic_name__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: __magic_name__ = last_checkpoint __magic_name__ = trainer.train(resume_from_checkpoint=_lowerCAmelCase ) __magic_name__ = train_result.metrics __magic_name__ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase ) ) __magic_name__ = min(_lowerCAmelCase, len(_lowerCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""", _lowerCAmelCase ) trainer.save_metrics("""train""", _lowerCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __magic_name__ = trainer.evaluate(eval_dataset=_lowerCAmelCase ) __magic_name__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase ) __magic_name__ = min(_lowerCAmelCase, len(_lowerCAmelCase ) ) trainer.log_metrics("""eval""", _lowerCAmelCase ) trainer.save_metrics("""eval""", _lowerCAmelCase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __magic_name__ = predict_dataset.remove_columns("""label""" ) __magic_name__ = trainer.predict(_lowerCAmelCase, metric_key_prefix="""predict""" ).predictions __magic_name__ = np.argmax(_lowerCAmelCase, axis=1 ) __magic_name__ = os.path.join(training_args.output_dir, """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(_lowerCAmelCase, """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(_lowerCAmelCase ): __magic_name__ = label_list[item] writer.write(f'''{index}\t{item}\n''' ) __magic_name__ = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**_lowerCAmelCase ) else: trainer.create_model_card(**_lowerCAmelCase ) def a__ ( A_ ): '''simple docstring''' main() if __name__ == "__main__": main()
711
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str=13 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=224 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Optional[Any]=400 , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=None , UpperCamelCase__ : int=True , UpperCamelCase__ : Any=[0.5, 0.5, 0.5] , UpperCamelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = size if size is not None else {"""height""": 18, """width""": 18} __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = num_channels __magic_name__ = image_size __magic_name__ = min_resolution __magic_name__ = max_resolution __magic_name__ = do_resize __magic_name__ = size __magic_name__ = do_normalize __magic_name__ = image_mean __magic_name__ = image_std def _lowercase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ): '''simple docstring''' a__ = ViTImageProcessor if is_vision_available() else None def _lowercase ( self : int ) -> Dict: """simple docstring""" __magic_name__ = EfficientFormerImageProcessorTester(self ) @property def _lowercase ( self : str ) -> List[str]: """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def _lowercase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) def _lowercase ( self : Any ) -> Dict: """simple docstring""" pass def _lowercase ( self : str ) -> Dict: """simple docstring""" __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __magic_name__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input __magic_name__ = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched __magic_name__ = image_processor(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def _lowercase ( self : List[Any] ) -> Any: """simple docstring""" __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __magic_name__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input __magic_name__ = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched __magic_name__ = image_processor(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def _lowercase ( self : List[Any] ) -> Dict: """simple docstring""" __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __magic_name__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input __magic_name__ = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched __magic_name__ = image_processor(_a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
712
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCAmelCase : List[str] = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
713
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
0
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def a__ ( ): '''simple docstring''' print("""Making key files...""" ) make_key_files("""rsa""", 1024 ) print("""Key files generation successful.""" ) def a__ ( A_ ): '''simple docstring''' print("""Generating prime p...""" ) __magic_name__ = rabinMiller.generate_large_prime(lowerCAmelCase__ ) print("""Generating prime q...""" ) __magic_name__ = rabinMiller.generate_large_prime(lowerCAmelCase__ ) __magic_name__ = p * q print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" ) while True: __magic_name__ = random.randrange(2 ** (key_size - 1), 2 ** (key_size) ) if cryptoMath.gcd(lowerCAmelCase__, (p - 1) * (q - 1) ) == 1: break print("""Calculating d that is mod inverse of e...""" ) __magic_name__ = cryptoMath.find_mod_inverse(lowerCAmelCase__, (p - 1) * (q - 1) ) __magic_name__ = (n, e) __magic_name__ = (n, d) return (public_key, private_key) def a__ ( A_, A_ ): '''simple docstring''' if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ): print("""\nWARNING:""" ) print( f'''\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n''' """Use a different name or delete these files and re-run this program.""" ) sys.exit() __magic_name__ , __magic_name__ = generate_key(lowerCAmelCase__ ) print(f'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(f'''{name}_pubkey.txt''', """w""" ) as out_file: out_file.write(f'''{key_size},{public_key[0]},{public_key[1]}''' ) print(f'''Writing private key to file {name}_privkey.txt...''' ) with open(f'''{name}_privkey.txt''', """w""" ) as out_file: out_file.write(f'''{key_size},{private_key[0]},{private_key[1]}''' ) if __name__ == "__main__": main()
714
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
0
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Dict = { "facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json", # See all DETR models at https://huggingface.co/models?filter=detr } class UpperCAmelCase_ ( __lowerCAmelCase ): '''simple docstring''' a__ = """detr""" a__ = ["""past_key_values"""] a__ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Any , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : int=6 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : str=6 , UpperCamelCase__ : List[str]=2048 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]="relu" , UpperCamelCase__ : Tuple=256 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Dict=1.0 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict="sine" , UpperCamelCase__ : Dict="resnet50" , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : str=1 , UpperCamelCase__ : int=5 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[str]=0.1 , **UpperCamelCase__ : Any , ) -> str: """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) __magic_name__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(_UpperCamelCase , _UpperCamelCase ): __magic_name__ = backbone_config.get("""model_type""" ) __magic_name__ = CONFIG_MAPPING[backbone_model_type] __magic_name__ = config_class.from_dict(_UpperCamelCase ) # set timm attributes to None __magic_name__ = None, None, None __magic_name__ = use_timm_backbone __magic_name__ = backbone_config __magic_name__ = num_channels __magic_name__ = num_queries __magic_name__ = d_model __magic_name__ = encoder_ffn_dim __magic_name__ = encoder_layers __magic_name__ = encoder_attention_heads __magic_name__ = decoder_ffn_dim __magic_name__ = decoder_layers __magic_name__ = decoder_attention_heads __magic_name__ = dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = activation_function __magic_name__ = init_std __magic_name__ = init_xavier_std __magic_name__ = encoder_layerdrop __magic_name__ = decoder_layerdrop __magic_name__ = encoder_layers __magic_name__ = auxiliary_loss __magic_name__ = position_embedding_type __magic_name__ = backbone __magic_name__ = use_pretrained_backbone __magic_name__ = dilation # Hungarian matcher __magic_name__ = class_cost __magic_name__ = bbox_cost __magic_name__ = giou_cost # Loss coefficients __magic_name__ = mask_loss_coefficient __magic_name__ = dice_loss_coefficient __magic_name__ = bbox_loss_coefficient __magic_name__ = giou_loss_coefficient __magic_name__ = eos_coefficient super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase ) @property def _lowercase ( self : Union[str, Any] ) -> int: """simple docstring""" return self.encoder_attention_heads @property def _lowercase ( self : Dict ) -> int: """simple docstring""" return self.d_model @classmethod def _lowercase ( cls : List[str] , UpperCamelCase__ : PretrainedConfig , **UpperCamelCase__ : List[str] ) -> List[Any]: """simple docstring""" return cls(backbone_config=_UpperCamelCase , **_UpperCamelCase ) def _lowercase ( self : List[Any] ) -> Dict[str, any]: """simple docstring""" __magic_name__ = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __magic_name__ = self.backbone_config.to_dict() __magic_name__ = self.__class__.model_type return output class UpperCAmelCase_ ( __lowerCAmelCase ): '''simple docstring''' a__ = version.parse("""1.11""" ) @property def _lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def _lowercase ( self : int ) -> float: """simple docstring""" return 1E-5 @property def _lowercase ( self : List[str] ) -> int: """simple docstring""" return 12
715
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ ( a__ , unittest.TestCase ): '''simple docstring''' a__ = LDMTextToImagePipeline a__ = TEXT_TO_IMAGE_PARAMS - { """negative_prompt""", """negative_prompt_embeds""", """cross_attention_kwargs""", """prompt_embeds""", } a__ = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """callback""", """callback_steps""", } a__ = TEXT_TO_IMAGE_BATCH_PARAMS a__ = False def _lowercase ( self : str ) -> Any: """simple docstring""" torch.manual_seed(0 ) __magic_name__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __magic_name__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_A , set_alpha_to_one=_A , ) torch.manual_seed(0 ) __magic_name__ = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , ) torch.manual_seed(0 ) __magic_name__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __magic_name__ = CLIPTextModel(_A ) __magic_name__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __magic_name__ = { 'unet': unet, 'scheduler': scheduler, 'vqvae': vae, 'bert': text_encoder, 'tokenizer': tokenizer, } return components def _lowercase ( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=0 ) -> int: """simple docstring""" if str(_A ).startswith("""mps""" ): __magic_name__ = torch.manual_seed(_A ) else: __magic_name__ = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _lowercase ( self : Tuple ) -> List[Any]: """simple docstring""" __magic_name__ = 'cpu' # ensure determinism for the device-dependent torch.Generator __magic_name__ = self.get_dummy_components() __magic_name__ = LDMTextToImagePipeline(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __magic_name__ = self.get_dummy_inputs(_A ) __magic_name__ = pipe(**_A ).images __magic_name__ = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) __magic_name__ = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : str ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=torch.floataa , UpperCamelCase__ : Optional[int]=0 ) -> Union[str, Any]: """simple docstring""" __magic_name__ = torch.manual_seed(_A ) __magic_name__ = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) ) __magic_name__ = torch.from_numpy(_A ).to(device=_A , dtype=_A ) __magic_name__ = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _lowercase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __magic_name__ = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(_A ) pipe.set_progress_bar_config(disable=_A ) __magic_name__ = self.get_inputs(_A ) __magic_name__ = pipe(**_A ).images __magic_name__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) __magic_name__ = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] ) __magic_name__ = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : int ) -> str: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict=torch.floataa , UpperCamelCase__ : Optional[int]=0 ) -> List[str]: """simple docstring""" __magic_name__ = torch.manual_seed(_A ) __magic_name__ = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) ) __magic_name__ = torch.from_numpy(_A ).to(device=_A , dtype=_A ) __magic_name__ = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 50, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _lowercase ( self : str ) -> Any: """simple docstring""" __magic_name__ = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(_A ) pipe.set_progress_bar_config(disable=_A ) __magic_name__ = self.get_inputs(_A ) __magic_name__ = pipe(**_A ).images[0] __magic_name__ = load_numpy( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" ) __magic_name__ = np.abs(expected_image - image ).max() assert max_diff < 1E-3
716
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
0
def a__ ( A_ ) -> Tuple: '''simple docstring''' if not isinstance(_lowerCamelCase, _lowerCamelCase ): raise TypeError("""Input value must be an 'int' type""" ) __magic_name__ = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
717
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
0
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : List[str] = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = {"vocab_file": "vocab.json"} __lowerCAmelCase : Dict = { "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } __lowerCAmelCase : Optional[Any] = {"mgp-str": 27} class UpperCAmelCase_ ( _UpperCAmelCase ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="[GO]" , UpperCamelCase__ : int="[GO]" , UpperCamelCase__ : str="[s]" , UpperCamelCase__ : Dict="[GO]" , **UpperCamelCase__ : List[str] ) -> Tuple: """simple docstring""" super().__init__( unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , ) with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle: __magic_name__ = json.load(lowerCamelCase_ ) __magic_name__ = {v: k for k, v in self.vocab.items()} @property def _lowercase ( self : int ) -> Dict: """simple docstring""" return len(self.vocab ) def _lowercase ( self : List[str] ) -> Dict: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> List[str]: """simple docstring""" __magic_name__ = [] for s in text: char_tokens.extend(lowerCamelCase_ ) return char_tokens def _lowercase ( self : Dict , UpperCamelCase__ : Tuple ) -> Optional[int]: """simple docstring""" return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) ) def _lowercase ( self : List[str] , UpperCamelCase__ : Dict ) -> Optional[int]: """simple docstring""" return self.decoder.get(lowerCamelCase_ ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCamelCase_ ): logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCamelCase_ ) ) return __magic_name__ = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" ) return (vocab_file,)
718
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
0
def a__ ( A_ ): '''simple docstring''' __magic_name__ = 1 for i in range(1, num + 1 ): fact *= i return fact def a__ ( A_ ): '''simple docstring''' __magic_name__ = 0 while number > 0: __magic_name__ = number % 10 sum_of_digits += last_digit __magic_name__ = number // 10 # Removing the last_digit from the given number return sum_of_digits def a__ ( A_ = 100 ): '''simple docstring''' __magic_name__ = factorial(_A ) __magic_name__ = split_and_add(_A ) return result if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
719
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
0
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder __lowerCAmelCase : Tuple = """__DUMMY_TRANSFORMERS_USER__""" __lowerCAmelCase : Dict = """Dummy User""" __lowerCAmelCase : Union[str, Any] = """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""" __lowerCAmelCase : Union[str, Any] = """https://hub-ci.huggingface.co""" __lowerCAmelCase : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" __lowerCAmelCase : Union[str, Any] = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" __lowerCAmelCase : List[Any] = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def a__ ( A_ ): '''simple docstring''' monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""", A_ ) @pytest.fixture def a__ ( A_ ): '''simple docstring''' monkeypatch.setattr("""datasets.config.HF_ENDPOINT""", A_ ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""", A_ ) @pytest.fixture def a__ ( A_ ): '''simple docstring''' monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""", A_ ) @pytest.fixture def a__ ( A_, A_ ): '''simple docstring''' HfFolder.save_token(A_ ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def a__ ( ): '''simple docstring''' return HfApi(endpoint=A_ ) @pytest.fixture(scope="""session""" ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = HfFolder.get_token() HfFolder.save_token(A_ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(A_ ) @pytest.fixture def a__ ( A_ ): '''simple docstring''' def _cleanup_repo(A_ ): hf_api.delete_repo(A_, token=A_, repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def a__ ( A_ ): '''simple docstring''' @contextmanager def _temporary_repo(A_ ): try: yield repo_id finally: cleanup_repo(A_ ) return _temporary_repo @pytest.fixture(scope="""session""" ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = f'''repo_txt_data-{int(time.time() * 10e3 )}''' __magic_name__ = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(A_, token=A_, repo_type="""dataset""", private=A_ ) hf_api.upload_file( token=A_, path_or_fileobj=str(A_ ), path_in_repo="""data/text_data.txt""", repo_id=A_, repo_type="""dataset""", ) yield repo_id try: hf_api.delete_repo(A_, token=A_, repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def a__ ( A_, A_, A_ ): '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = f'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' __magic_name__ = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(A_, token=A_, repo_type="""dataset""", private=A_ ) hf_api.upload_file( token=A_, path_or_fileobj=str(A_ ), path_in_repo="""data.zip""", repo_id=A_, repo_type="""dataset""", ) yield repo_id try: hf_api.delete_repo(A_, token=A_, repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def a__ ( A_, A_, A_ ): '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = f'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' __magic_name__ = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(A_, token=A_, repo_type="""dataset""", private=A_ ) hf_api.upload_file( token=A_, path_or_fileobj=str(A_ ), path_in_repo="""data.zip""", repo_id=A_, repo_type="""dataset""", ) yield repo_id try: hf_api.delete_repo(A_, token=A_, repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def a__ ( A_, A_, A_ ): '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
720
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase : List[str] = logging.get_logger(__name__) __lowerCAmelCase : str = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' a__ = """roformer""" def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0000 , UpperCamelCase__ : str=None , UpperCamelCase__ : Dict=768 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : int=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=1536 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-12 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : str , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=_lowercase , **_lowercase ) __magic_name__ = vocab_size __magic_name__ = hidden_size if embedding_size is None else embedding_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = rotary_value __magic_name__ = use_cache class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @property def _lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": __magic_name__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __magic_name__ = {0: """batch""", 1: """sequence"""} __magic_name__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
721
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values __lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('--user', type=str, default='ubuntu') parser.add_argument('--host', type=str, default='localhost') parser.add_argument('--key_path', type=str, default=None) parser.add_argument('--instance', type=str, default='V100:1') parser.add_argument('--provider', type=str, default='cheapest') parser.add_argument('--use_spot', type=bool, default=False) parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py') __lowerCAmelCase , __lowerCAmelCase : int = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('Cannot specify both BYO and on-demand cluster args') __lowerCAmelCase : Optional[int] = rh.cluster( name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path} ) else: __lowerCAmelCase : Optional[int] = rh.cluster( name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) __lowerCAmelCase : Dict = args.example.rsplit('/', 1)[0] # Set up remote environment cluster.install_packages(['pip:./']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
700
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Dict = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' a__ = 'trocr' a__ = ['past_key_values'] a__ = { 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self : str , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : int=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : int=True , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[str]=2 , **UpperCamelCase__ : Optional[Any] , ) -> List[Any]: """simple docstring""" __magic_name__ = vocab_size __magic_name__ = d_model __magic_name__ = decoder_layers __magic_name__ = decoder_attention_heads __magic_name__ = decoder_ffn_dim __magic_name__ = activation_function __magic_name__ = max_position_embeddings __magic_name__ = dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = init_std __magic_name__ = decoder_layerdrop __magic_name__ = use_cache __magic_name__ = scale_embedding __magic_name__ = use_learned_position_embeddings __magic_name__ = layernorm_embedding super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
701
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
0
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : List[str] = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n' class UpperCAmelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(UpperCamelCase__ ) def __call__( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , **UpperCamelCase__ : Dict ) -> bool: """simple docstring""" raise NotImplementedError("""StoppingCriteria needs to be subclassed""" ) class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] = None ) -> Optional[int]: """simple docstring""" __magic_name__ = max_length __magic_name__ = max_position_embeddings @add_start_docstrings(UpperCamelCase__ ) def __call__( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any ) -> bool: """simple docstring""" __magic_name__ = input_ids.shape[-1] __magic_name__ = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( """This is a friendly reminder - the current text generation call will exceed the model\'s predefined """ F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' """exceptions, performance degradation, or nothing at all.""" ) return is_done class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict ) -> Tuple: """simple docstring""" warnings.warn( """The class `MaxNewTokensCriteria` is deprecated. """ F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' """with `max_length = start_length + max_new_tokens` instead.""" , UpperCamelCase__ , ) __magic_name__ = start_length __magic_name__ = max_new_tokens __magic_name__ = start_length + max_new_tokens @add_start_docstrings(UpperCamelCase__ ) def __call__( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ) -> bool: """simple docstring""" return input_ids.shape[-1] >= self.max_length class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple = None ) -> int: """simple docstring""" __magic_name__ = max_time __magic_name__ = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(UpperCamelCase__ ) def __call__( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Union[str, Any] ) -> bool: """simple docstring""" return time.time() - self.initial_timestamp > self.max_time class UpperCAmelCase_ ( _A ): '''simple docstring''' @add_start_docstrings(UpperCamelCase__ ) def __call__( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[int] ) -> bool: """simple docstring""" return any(criteria(UpperCamelCase__ , UpperCamelCase__ ) for criteria in self ) @property def _lowercase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" for stopping_criterium in self: if isinstance(UpperCamelCase__ , UpperCamelCase__ ): return stopping_criterium.max_length elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): return stopping_criterium.max_length return None def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = stopping_criteria.max_length __magic_name__ = deepcopy(A_ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""", A_ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=A_ ) ) return new_stopping_criteria
702
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
0
from math import factorial, pi def a__ ( A_, A_ = 30 ): '''simple docstring''' if not isinstance(snake_case_, (int, float) ): raise ValueError("""maclaurin_sin() requires either an int or float for theta""" ) if not isinstance(snake_case_, snake_case_ ) or accuracy <= 0: raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" ) __magic_name__ = float(snake_case_ ) __magic_name__ = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(snake_case_ ) ) def a__ ( A_, A_ = 30 ): '''simple docstring''' if not isinstance(snake_case_, (int, float) ): raise ValueError("""maclaurin_cos() requires either an int or float for theta""" ) if not isinstance(snake_case_, snake_case_ ) or accuracy <= 0: raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" ) __magic_name__ = float(snake_case_ ) __magic_name__ = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
703
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
0
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : List[Any] ) -> int: """simple docstring""" debug_launcher(test_script.main ) def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" debug_launcher(test_ops.main )
704
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
0
import os import string import sys __lowerCAmelCase : Optional[int] = 1 << 8 __lowerCAmelCase : Optional[Any] = { 'tab': ord('\t'), 'newline': ord('\r'), 'esc': 27, 'up': 65 + ARROW_KEY_FLAG, 'down': 66 + ARROW_KEY_FLAG, 'right': 67 + ARROW_KEY_FLAG, 'left': 68 + ARROW_KEY_FLAG, 'mod_int': 91, 'undefined': sys.maxsize, 'interrupt': 3, 'insert': 50, 'delete': 51, 'pg_up': 53, 'pg_down': 54, } __lowerCAmelCase : List[str] = KEYMAP['up'] __lowerCAmelCase : Any = KEYMAP['left'] if sys.platform == "win32": __lowerCAmelCase : List[Any] = [] __lowerCAmelCase : Tuple = { b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG, b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG, b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG, b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG, b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG, b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG, b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG, b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG, } for i in range(10): __lowerCAmelCase : Tuple = ord(str(i)) def a__ ( ): '''simple docstring''' if os.name == "nt": import msvcrt __magic_name__ = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(A_ ) == 0: # Read the keystroke __magic_name__ = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): __magic_name__ = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: __magic_name__ = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(A_ ) if ord(A_ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) __magic_name__ = chr(KEYMAP["""esc"""] ) except KeyError: __magic_name__ = cha[1] else: __magic_name__ = ch.decode(A_ ) else: __magic_name__ = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty __magic_name__ = sys.stdin.fileno() __magic_name__ = termios.tcgetattr(A_ ) try: tty.setraw(A_ ) __magic_name__ = sys.stdin.read(1 ) finally: termios.tcsetattr(A_, termios.TCSADRAIN, A_ ) return ch def a__ ( ): '''simple docstring''' __magic_name__ = get_raw_chars() if ord(A_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(A_ ) == KEYMAP["esc"]: __magic_name__ = get_raw_chars() if ord(A_ ) == KEYMAP["mod_int"]: __magic_name__ = get_raw_chars() if ord(A_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(A_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(A_ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
0
import operator as op __lowerCAmelCase : List[Any] = 'scaler.pt' __lowerCAmelCase : int = 'pytorch_model' __lowerCAmelCase : Tuple = 'random_states' __lowerCAmelCase : List[str] = 'optimizer' __lowerCAmelCase : Optional[Any] = 'scheduler' __lowerCAmelCase : List[str] = 'pytorch_model.bin' __lowerCAmelCase : str = 'pytorch_model.bin.index.json' __lowerCAmelCase : Dict = 'model.safetensors' __lowerCAmelCase : Optional[Any] = 'model.safetensors.index.json' __lowerCAmelCase : int = '1.10.2' __lowerCAmelCase : int = 'py38' __lowerCAmelCase : str = '4.17.0' __lowerCAmelCase : List[Any] = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge'] __lowerCAmelCase : Optional[int] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2'] __lowerCAmelCase : Tuple = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP'] __lowerCAmelCase : int = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH'] __lowerCAmelCase : List[str] = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] __lowerCAmelCase : Union[str, Any] = '2.0.1' __lowerCAmelCase : Tuple = ['pdsh', 'standard', 'openmpi', 'mvapich'] __lowerCAmelCase : Any = ['default', 'reduce-overhead', 'max-autotune'] __lowerCAmelCase : List[Any] = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 __lowerCAmelCase : List[Any] = [ 'nnodes', 'nproc_per_node', 'rdzv_backend', 'rdzv_endpoint', 'rdzv_id', 'rdzv_conf', 'standalone', 'max_restarts', 'monitor_interval', 'start_method', 'role', 'module', 'm', 'no_python', 'run_path', 'log_dir', 'r', 'redirects', 't', 'tee', 'node_rank', 'master_addr', 'master_port', ] __lowerCAmelCase : Optional[int] = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM'] __lowerCAmelCase : Optional[Any] = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
706
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase : List[Any] = logging.get_logger(__name__) __lowerCAmelCase : Optional[Any] = { 'andreasmadsen/efficient_mlm_m0.40': ( 'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """roberta-prelayernorm""" def __init__( self : Optional[int] , UpperCamelCase__ : int=5_0265 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : int=12 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : Any=3072 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : List[str]=1E-12 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=None , **UpperCamelCase__ : List[str] , ) -> Any: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = use_cache __magic_name__ = classifier_dropout class UpperCAmelCase_ ( _A ): '''simple docstring''' @property def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" if self.task == "multiple-choice": __magic_name__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __magic_name__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
707
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
0
from heapq import heappop, heappush import numpy as np def a__ ( A_, A_, A_, A_, ): '''simple docstring''' __magic_name__ = grid.shape __magic_name__ = [-1, 1, 0, 0] __magic_name__ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] __magic_name__ = [(0, source)], set() __magic_name__ = np.full((rows, cols), np.inf ) __magic_name__ = 0 __magic_name__ = np.empty((rows, cols), dtype=UpperCAmelCase__ ) __magic_name__ = None while queue: (__magic_name__) = heappop(UpperCAmelCase__ ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: __magic_name__ = [] while (x, y) != source: path.append((x, y) ) __magic_name__ = predecessors[x, y] path.append(UpperCAmelCase__ ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(UpperCAmelCase__ ) ): __magic_name__ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: __magic_name__ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(UpperCAmelCase__, (dist + 1, (nx, ny)) ) __magic_name__ = dist + 1 __magic_name__ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
708
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
0
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} __magic_name__ = tokenizer(example["""content"""], truncation=_lowercase )["input_ids"] __magic_name__ = len(example["""content"""] ) / len(output["""input_ids"""] ) return output __lowerCAmelCase : int = HfArgumentParser(PretokenizationArguments) __lowerCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: __lowerCAmelCase : Tuple = multiprocessing.cpu_count() __lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir) __lowerCAmelCase : Dict = time.time() __lowerCAmelCase : Tuple = load_dataset(args.dataset_name, split='train') print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') __lowerCAmelCase : Dict = time.time() __lowerCAmelCase : Tuple = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') __lowerCAmelCase : Tuple = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
709
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
0
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), f'''{len(UpperCAmelCase__ )} != {len(UpperCAmelCase__ )}''' dest_layers.load_state_dict(layers_to_copy.state_dict() ) __lowerCAmelCase : int = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } __lowerCAmelCase : Tuple = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def a__ ( A_, A_ ): '''simple docstring''' try: __magic_name__ = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first''' f''' {n_student}''' ) return list(range(UpperCAmelCase__ ) ) def a__ ( A_, A_ ): '''simple docstring''' if n_student > n_teacher: raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' ) elif n_teacher == n_student: return list(range(UpperCAmelCase__ ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def a__ ( A_, A_ = "student", A_ = None, A_ = None, A_=False, A_=None, A_=None, **A_, ): '''simple docstring''' __magic_name__ = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.""" assert (e is not None) or (d is not None), _msg if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): AutoTokenizer.from_pretrained(UpperCAmelCase__ ).save_pretrained(UpperCAmelCase__ ) # purely for convenience __magic_name__ = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ ).eval() else: assert isinstance(UpperCAmelCase__, UpperCAmelCase__ ), f'''teacher must be a model or string got type {type(UpperCAmelCase__ )}''' __magic_name__ = teacher.config.to_diff_dict() try: __magic_name__ , __magic_name__ = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: __magic_name__ = teacher_e if d is None: __magic_name__ = teacher_d init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} ) except AttributeError: # T5 if hasattr(teacher.config, """num_encoder_layers""" ): __magic_name__ , __magic_name__ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: __magic_name__ , __magic_name__ = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: __magic_name__ = teacher_e if d is None: __magic_name__ = teacher_d if hasattr(teacher.config, """num_encoder_layers""" ): init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} ) else: init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(UpperCAmelCase__ ) # Copy weights __magic_name__ = teacher.config_class(**UpperCAmelCase__ ) __magic_name__ = AutoModelForSeqaSeqLM.from_config(UpperCAmelCase__ ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. __magic_name__ = student.load_state_dict(teacher.state_dict(), strict=UpperCAmelCase__ ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save __magic_name__ , __magic_name__ = list(range(UpperCAmelCase__ ) ), list(range(UpperCAmelCase__ ) ) logger.info( f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to''' f''' {save_path}''' ) student.save_pretrained(UpperCAmelCase__ ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: __magic_name__ = pick_layers_to_copy(UpperCAmelCase__, UpperCAmelCase__ ) if d_layers_to_copy is None: __magic_name__ = pick_layers_to_copy(UpperCAmelCase__, UpperCAmelCase__ ) try: if hasattr( UpperCAmelCase__, """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers, student.prophetnet.encoder.layers, UpperCAmelCase__ ) copy_layers(teacher.prophetnet.decoder.layers, student.prophetnet.decoder.layers, UpperCAmelCase__ ) else: copy_layers(teacher.model.encoder.layers, student.model.encoder.layers, UpperCAmelCase__ ) copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, UpperCAmelCase__ ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block, student.encoder.block, UpperCAmelCase__ ) copy_layers(teacher.decoder.block, student.decoder.block, UpperCAmelCase__ ) logger.info( f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' ) __magic_name__ = { """teacher_type""": teacher.config.model_type, """copied_encoder_layers""": e_layers_to_copy, """copied_decoder_layers""": d_layers_to_copy, } student.save_pretrained(UpperCAmelCase__ ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
710
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
0
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
711
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
0
'''simple docstring''' import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __lowerCAmelCase : Dict = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __lowerCAmelCase : str = [0, 25, 50] __lowerCAmelCase : int = [25, 50, 75] __lowerCAmelCase : int = fuzz.membership.trimf(X, abca) __lowerCAmelCase : str = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __lowerCAmelCase : Tuple = np.ones(75) __lowerCAmelCase : Optional[int] = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) __lowerCAmelCase : str = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __lowerCAmelCase : Tuple = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __lowerCAmelCase : int = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __lowerCAmelCase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __lowerCAmelCase : Union[str, Any] = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __lowerCAmelCase : Optional[int] = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __lowerCAmelCase : Any = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __lowerCAmelCase : List[str] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
712
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
0
def a__ ( A_ ): '''simple docstring''' if any(not isinstance(__UpperCamelCase, __UpperCamelCase ) or x < 0 for x in sequence ): raise TypeError("""Sequence must be list of non-negative integers""" ) for _ in range(len(__UpperCamelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__UpperCamelCase, sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
713
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
0
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : str = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : Any = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
714
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
0
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCAmelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int = None , UpperCamelCase__ : int = None ) -> Any: """simple docstring""" super().__init__() __magic_name__ = pad_token_id __magic_name__ = max_length __magic_name__ = vocab __magic_name__ = merges __magic_name__ = BytePairTokenizer(__lowercase , __lowercase , sequence_length=__lowercase ) @classmethod def _lowercase ( cls : List[Any] , UpperCamelCase__ : GPTaTokenizer , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : int ) -> int: """simple docstring""" __magic_name__ = [""" """.join(__lowercase ) for m in tokenizer.bpe_ranks.keys()] __magic_name__ = tokenizer.get_vocab() return cls(__lowercase , __lowercase , *__lowercase , **__lowercase ) @classmethod def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[str, os.PathLike] , *UpperCamelCase__ : Any , **UpperCamelCase__ : str ) -> Optional[int]: """simple docstring""" __magic_name__ = GPTaTokenizer.from_pretrained(__lowercase , *__lowercase , **__lowercase ) return cls.from_tokenizer(__lowercase , *__lowercase , **__lowercase ) @classmethod def _lowercase ( cls : str , UpperCamelCase__ : Union[str, Any] ) -> List[str]: """simple docstring""" return cls(**__lowercase ) def _lowercase ( self : Optional[Any] ) -> List[str]: """simple docstring""" return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _lowercase ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : int = None ) -> Any: """simple docstring""" __magic_name__ = self.tf_tokenizer(__lowercase ) __magic_name__ = tf.ones_like(__lowercase ) if self.pad_token_id is not None: # pad the tokens up to max length __magic_name__ = max_length if max_length is not None else self.max_length if max_length is not None: __magic_name__ , __magic_name__ = pad_model_inputs( __lowercase , max_seq_length=__lowercase , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
715
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
0
import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase_ ( __snake_case , unittest.TestCase ): '''simple docstring''' a__ = KandinskyVaaPipeline a__ = [ 'image_embeds', 'negative_image_embeds', ] a__ = ['image_embeds', 'negative_image_embeds'] a__ = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] a__ = False @property def _lowercase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" return 32 @property def _lowercase ( self : List[str] ) -> Dict: """simple docstring""" return 32 @property def _lowercase ( self : str ) -> Optional[Any]: """simple docstring""" return self.time_input_dim @property def _lowercase ( self : List[str] ) -> List[Any]: """simple docstring""" return self.time_input_dim * 4 @property def _lowercase ( self : Any ) -> Tuple: """simple docstring""" return 100 @property def _lowercase ( self : Union[str, Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) __magic_name__ = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __magic_name__ = UNetaDConditionModel(**A_ ) return model @property def _lowercase ( self : int ) -> str: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowercase ( self : int ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __magic_name__ = VQModel(**self.dummy_movq_kwargs ) return model def _lowercase ( self : Tuple ) -> int: """simple docstring""" __magic_name__ = self.dummy_unet __magic_name__ = self.dummy_movq __magic_name__ = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=A_ , ) __magic_name__ = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=0 ) -> Tuple: """simple docstring""" __magic_name__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ ) __magic_name__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( A_ ) if str(A_ ).startswith("""mps""" ): __magic_name__ = torch.manual_seed(A_ ) else: __magic_name__ = torch.Generator(device=A_ ).manual_seed(A_ ) __magic_name__ = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def _lowercase ( self : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = "cpu" __magic_name__ = self.get_dummy_components() __magic_name__ = self.pipeline_class(**A_ ) __magic_name__ = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __magic_name__ = pipe(**self.get_dummy_inputs(A_ ) ) __magic_name__ = output.images __magic_name__ = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] __magic_name__ = image[0, -3:, -3:, -1] __magic_name__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __magic_name__ = np.array( [0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : List[Any] ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Any ) -> Tuple: """simple docstring""" __magic_name__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" ) __magic_name__ = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) __magic_name__ = KandinskyVaaPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa ) __magic_name__ = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) __magic_name__ = "red cat, 4k photo" __magic_name__ = torch.Generator(device="""cuda""" ).manual_seed(0 ) __magic_name__ = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __magic_name__ = torch.Generator(device="""cuda""" ).manual_seed(0 ) __magic_name__ = pipeline( image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="""np""" , ) __magic_name__ = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A_ , A_ )
716
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
0
from __future__ import annotations import math def a__ ( A_, A_ ) -> Dict: '''simple docstring''' if len(A_ ) != 2 or len(a[0] ) != 2 or len(A_ ) != 2 or len(b[0] ) != 2: raise Exception("""Matrices are not 2x2""" ) __magic_name__ = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def a__ ( A_, A_ ) -> Union[str, Any]: '''simple docstring''' return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(A_ ) ) ] def a__ ( A_, A_ ) -> List[str]: '''simple docstring''' return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(A_ ) ) ] def a__ ( A_ ) -> List[str]: '''simple docstring''' if len(A_ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception("""Odd matrices are not supported!""" ) __magic_name__ = len(A_ ) __magic_name__ = matrix_length // 2 __magic_name__ = [[a[i][j] for j in range(A_, A_ )] for i in range(A_ )] __magic_name__ = [ [a[i][j] for j in range(A_, A_ )] for i in range(A_, A_ ) ] __magic_name__ = [[a[i][j] for j in range(A_ )] for i in range(A_ )] __magic_name__ = [[a[i][j] for j in range(A_ )] for i in range(A_, A_ )] return top_left, top_right, bot_left, bot_right def a__ ( A_ ) -> Optional[Any]: '''simple docstring''' return len(A_ ), len(matrix[0] ) def a__ ( A_ ) -> str: '''simple docstring''' print("""\n""".join(str(A_ ) for line in matrix ) ) def a__ ( A_, A_ ) -> List[Any]: '''simple docstring''' if matrix_dimensions(A_ ) == (2, 2): return default_matrix_multiplication(A_, A_ ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = split_matrix(A_ ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = split_matrix(A_ ) __magic_name__ = actual_strassen(A_, matrix_subtraction(A_, A_ ) ) __magic_name__ = actual_strassen(matrix_addition(A_, A_ ), A_ ) __magic_name__ = actual_strassen(matrix_addition(A_, A_ ), A_ ) __magic_name__ = actual_strassen(A_, matrix_subtraction(A_, A_ ) ) __magic_name__ = actual_strassen(matrix_addition(A_, A_ ), matrix_addition(A_, A_ ) ) __magic_name__ = actual_strassen(matrix_subtraction(A_, A_ ), matrix_addition(A_, A_ ) ) __magic_name__ = actual_strassen(matrix_subtraction(A_, A_ ), matrix_addition(A_, A_ ) ) __magic_name__ = matrix_addition(matrix_subtraction(matrix_addition(A_, A_ ), A_ ), A_ ) __magic_name__ = matrix_addition(A_, A_ ) __magic_name__ = matrix_addition(A_, A_ ) __magic_name__ = matrix_subtraction(matrix_subtraction(matrix_addition(A_, A_ ), A_ ), A_ ) # construct the new matrix from our 4 quadrants __magic_name__ = [] for i in range(len(A_ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(A_ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def a__ ( A_, A_ ) -> Any: '''simple docstring''' if matrix_dimensions(A_ )[1] != matrix_dimensions(A_ )[0]: __magic_name__ = ( """Unable to multiply these matrices, please check the dimensions.\n""" f'''Matrix A: {matrixa}\n''' f'''Matrix B: {matrixa}''' ) raise Exception(A_ ) __magic_name__ = matrix_dimensions(A_ ) __magic_name__ = matrix_dimensions(A_ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __magic_name__ = max(*A_, *A_ ) __magic_name__ = int(math.pow(2, math.ceil(math.loga(A_ ) ) ) ) __magic_name__ = matrixa __magic_name__ = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0, A_ ): if i < dimensiona[0]: for _ in range(dimensiona[1], A_ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1], A_ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) __magic_name__ = actual_strassen(A_, A_ ) # Removing the additional zeros for i in range(0, A_ ): if i < dimensiona[0]: for _ in range(dimensiona[1], A_ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": __lowerCAmelCase : Tuple = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] __lowerCAmelCase : Any = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
717
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class UpperCAmelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' a__ = ShapEImgaImgPipeline a__ = ["image"] a__ = ["image"] a__ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] a__ = False @property def _lowercase ( self : Any ) -> List[str]: """simple docstring""" return 32 @property def _lowercase ( self : str ) -> str: """simple docstring""" return 32 @property def _lowercase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" return self.time_input_dim * 4 @property def _lowercase ( self : Tuple ) -> Optional[int]: """simple docstring""" return 8 @property def _lowercase ( self : Tuple ) -> Any: """simple docstring""" torch.manual_seed(0 ) __magic_name__ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __magic_name__ = CLIPVisionModel(lowerCamelCase__ ) return model @property def _lowercase ( self : Optional[Any] ) -> Tuple: """simple docstring""" __magic_name__ = CLIPImageProcessor( crop_size=224 , do_center_crop=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_resize=lowerCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , ) return image_processor @property def _lowercase ( self : Optional[Any] ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) __magic_name__ = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "embedding_proj_norm_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __magic_name__ = PriorTransformer(**lowerCamelCase__ ) return model @property def _lowercase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) __magic_name__ = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __magic_name__ = ShapERenderer(**lowerCamelCase__ ) return model def _lowercase ( self : int ) -> List[str]: """simple docstring""" __magic_name__ = self.dummy_prior __magic_name__ = self.dummy_image_encoder __magic_name__ = self.dummy_image_processor __magic_name__ = self.dummy_renderer __magic_name__ = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase__ , clip_sample=lowerCamelCase__ , clip_sample_range=1.0 , ) __magic_name__ = { "prior": prior, "image_encoder": image_encoder, "image_processor": image_processor, "renderer": renderer, "scheduler": scheduler, } return components def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]=0 ) -> Optional[Any]: """simple docstring""" __magic_name__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) if str(lowerCamelCase__ ).startswith("""mps""" ): __magic_name__ = torch.manual_seed(lowerCamelCase__ ) else: __magic_name__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __magic_name__ = { "image": input_image, "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def _lowercase ( self : List[str] ) -> List[str]: """simple docstring""" __magic_name__ = "cpu" __magic_name__ = self.get_dummy_components() __magic_name__ = self.pipeline_class(**lowerCamelCase__ ) __magic_name__ = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __magic_name__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) ) __magic_name__ = output.images[0] __magic_name__ = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __magic_name__ = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _lowercase ( self : Optional[Any] ) -> str: """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _lowercase ( self : Optional[int] ) -> List[Any]: """simple docstring""" __magic_name__ = torch_device == "cpu" __magic_name__ = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_dummy_components() __magic_name__ = self.pipeline_class(**lowerCamelCase__ ) __magic_name__ = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __magic_name__ = 1 __magic_name__ = 2 __magic_name__ = self.get_dummy_inputs(lowerCamelCase__ ) for key in inputs.keys(): if key in self.batch_params: __magic_name__ = batch_size * [inputs[key]] __magic_name__ = pipe(**lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __magic_name__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" ) __magic_name__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_img2img_out.npy""" ) __magic_name__ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" ) __magic_name__ = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __magic_name__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __magic_name__ = pipe( lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
718
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
0