code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( _UpperCAmelCase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =None lowerCamelCase__ =BloomTokenizerFast lowerCamelCase__ =BloomTokenizerFast lowerCamelCase__ =True lowerCamelCase__ =False lowerCamelCase__ ='tokenizer_file' lowerCamelCase__ ={'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'} def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().setUp() __snake_case : Tuple = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE (self , **a_ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = self.get_rust_tokenizer() __snake_case : Any = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""] __snake_case : Union[str, Any] = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]] __snake_case : Dict = tokenizer.batch_encode_plus(lowercase_ )["""input_ids"""] self.assertListEqual(lowercase_ , lowercase_ ) __snake_case : Union[str, Any] = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE (self , a_=6 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input __snake_case : Union[str, Any] = """This is a simple input""" __snake_case : Dict = ["""This is a simple input 1""", """This is a simple input 2"""] __snake_case : Any = ("""This is a simple input""", """This is a pair""") __snake_case : Optional[Any] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests try: tokenizer_r.encode(lowercase_ , max_length=lowercase_ ) tokenizer_r.encode_plus(lowercase_ , max_length=lowercase_ ) tokenizer_r.batch_encode_plus(lowercase_ , max_length=lowercase_ ) tokenizer_r.encode(lowercase_ , max_length=lowercase_ ) tokenizer_r.batch_encode_plus(lowercase_ , max_length=lowercase_ ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) __snake_case : Optional[Any] = None # Hotfixing padding = None self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding='''max_length''' ) # Simple input self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' ) # Simple input self.assertRaises( lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' , ) # Pair input self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding='''max_length''' ) # Pair input self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' ) # Pair input self.assertRaises( lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.get_rust_tokenizer() __snake_case : Optional[int] = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=lowercase_ ) __snake_case : List[str] = next(iter(lowercase_ ) )["""premise"""] # pick up one data __snake_case : str = list(sample_data.values() ) __snake_case : Tuple = list(map(tokenizer.encode , lowercase_ ) ) __snake_case : List[str] = [tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ ) for x in output_tokens] self.assertListEqual(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
371
"""simple docstring""" def lowercase ( _snake_case : int ) ->str: """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) __snake_case : Any = len(bin(_snake_case )[3:] ) __snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:] __snake_case : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(_snake_case )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
24
0
"""simple docstring""" def lowercase ( _snake_case : list[int] ) ->Dict: """simple docstring""" __snake_case : int = [] if len(__a ) == 1: return [nums.copy()] for _ in range(len(__a ) ): __snake_case : List[Any] = nums.pop(0 ) __snake_case : Optional[int] = permute(__a ) for perm in permutations: perm.append(__a ) result.extend(__a ) nums.append(__a ) return result def lowercase ( _snake_case : List[Any] ) ->int: """simple docstring""" def backtrack(_snake_case : int ): if start == len(__a ) - 1: output.append(nums[:] ) else: for i in range(__a , len(__a ) ): __snake_case , __snake_case : List[Any] = nums[i], nums[start] backtrack(start + 1 ) __snake_case , __snake_case : int = nums[i], nums[start] # backtrack __snake_case : Optional[int] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function SCREAMING_SNAKE_CASE : Dict = permutea([1, 2, 3]) print(res) doctest.testmod()
350
"""simple docstring""" def lowercase ( ) ->int: """simple docstring""" return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(_snake_case , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
0
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def lowercase ( _snake_case : Tuple , _snake_case : str , _snake_case : List[str] = None ) ->List[str]: """simple docstring""" if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path __snake_case : str = quote(__lowerCamelCase ) return hfh.hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' , revision=__lowerCamelCase )
351
"""simple docstring""" def lowercase ( _snake_case : int = 100 ) ->int: """simple docstring""" __snake_case : str = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Dict = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'{solution() = }')
24
0
"""simple docstring""" from __future__ import annotations from random import random from typing import Generic, TypeVar SCREAMING_SNAKE_CASE : List[Any] = TypeVar("""KT""") SCREAMING_SNAKE_CASE : Optional[int] = TypeVar("""VT""") class _UpperCAmelCase ( Generic[KT, VT] ): '''simple docstring''' def __init__(self , a_ = "root" , a_ = None ): '''simple docstring''' __snake_case : List[Any] = key __snake_case : Tuple = value __snake_case : list[Node[KT, VT]] = [] def __repr__(self ): '''simple docstring''' return f"""Node({self.key}: {self.value})""" @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return len(self.forward ) class _UpperCAmelCase ( Generic[KT, VT] ): '''simple docstring''' def __init__(self , a_ = 0.5 , a_ = 16 ): '''simple docstring''' __snake_case : Node[KT, VT] = Node[KT, VT]() __snake_case : List[str] = 0 __snake_case : List[str] = p __snake_case : str = max_level def __str__(self ): '''simple docstring''' __snake_case : int = list(self ) if len(snake_case__ ) == 0: return f"""SkipList(level={self.level})""" __snake_case : Any = max((len(str(snake_case__ ) ) for item in items) , default=4 ) __snake_case : List[str] = max(snake_case__ , 4 ) + 4 __snake_case : Union[str, Any] = self.head __snake_case : int = [] __snake_case : Any = node.forward.copy() lines.append(f"""[{node.key}]""".ljust(snake_case__ , '''-''' ) + '''* ''' * len(snake_case__ ) ) lines.append(''' ''' * label_size + '''| ''' * len(snake_case__ ) ) while len(node.forward ) != 0: __snake_case : Union[str, Any] = node.forward[0] lines.append( f"""[{node.key}]""".ljust(snake_case__ , '''-''' ) + ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) ) lines.append(''' ''' * label_size + '''| ''' * len(snake_case__ ) ) __snake_case : Union[str, Any] = node.forward lines.append('''None'''.ljust(snake_case__ ) + '''* ''' * len(snake_case__ ) ) return f"""SkipList(level={self.level})\n""" + "\n".join(snake_case__ ) def __iter__(self ): '''simple docstring''' __snake_case : Optional[int] = self.head while len(node.forward ) != 0: yield node.forward[0].key __snake_case : List[Any] = node.forward[0] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = 1 while random() < self.p and level < self.max_level: level += 1 return level def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : str = [] __snake_case : Optional[int] = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: __snake_case : Any = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(snake_case__ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = self._locate_node(snake_case__ ) if node is not None: for i, update_node in enumerate(snake_case__ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: __snake_case : Optional[int] = node.forward[i] else: __snake_case : int = update_node.forward[:i] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : int = self._locate_node(snake_case__ ) if node is not None: __snake_case : Any = value else: __snake_case : Dict = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , snake_case__ ): update_vector.append(self.head ) __snake_case : Optional[int] = level __snake_case : List[str] = Node(snake_case__ , snake_case__ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(snake_case__ ) else: __snake_case : Union[str, Any] = new_node def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : str = self._locate_node(snake_case__ ) if node is not None: return node.value return None def lowercase ( ) ->List[str]: """simple docstring""" __snake_case : Any = SkipList() skip_list.insert('''Key1''' , 3 ) skip_list.insert('''Key2''' , 12 ) skip_list.insert('''Key3''' , 41 ) skip_list.insert('''Key4''' , -19 ) __snake_case : Optional[int] = skip_list.head __snake_case : Any = {} while node.level != 0: __snake_case : List[str] = node.forward[0] __snake_case : int = node.value assert len(_A ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def lowercase ( ) ->List[str]: """simple docstring""" __snake_case : Any = SkipList() skip_list.insert('''Key1''' , 10 ) skip_list.insert('''Key1''' , 12 ) skip_list.insert('''Key5''' , 7 ) skip_list.insert('''Key7''' , 10 ) skip_list.insert('''Key10''' , 5 ) skip_list.insert('''Key7''' , 7 ) skip_list.insert('''Key5''' , 5 ) skip_list.insert('''Key10''' , 10 ) __snake_case : Union[str, Any] = skip_list.head __snake_case : Optional[Any] = {} while node.level != 0: __snake_case : List[Any] = node.forward[0] __snake_case : Any = node.value if len(_A ) != 4: print() assert len(_A ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def lowercase ( ) ->str: """simple docstring""" __snake_case : List[Any] = SkipList() assert skip_list.find('''Some key''' ) is None def lowercase ( ) ->str: """simple docstring""" __snake_case : Tuple = SkipList() skip_list.insert('''Key2''' , 20 ) assert skip_list.find('''Key2''' ) == 20 skip_list.insert('''Some Key''' , 10 ) skip_list.insert('''Key2''' , 8 ) skip_list.insert('''V''' , 13 ) assert skip_list.find('''Y''' ) is None assert skip_list.find('''Key2''' ) == 8 assert skip_list.find('''Some Key''' ) == 10 assert skip_list.find('''V''' ) == 13 def lowercase ( ) ->Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = SkipList() skip_list.delete('''Some key''' ) assert len(skip_list.head.forward ) == 0 def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''Key2''' ) is None def lowercase ( ) ->Any: """simple docstring""" __snake_case : List[str] = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 14 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''V''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) == 14 assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''X''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key1''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) is None def lowercase ( ) ->Any: """simple docstring""" __snake_case : List[str] = SkipList() skip_list.insert('''Key1''' , 12 ) skip_list.insert('''V''' , 13 ) skip_list.insert('''X''' , 142 ) skip_list.insert('''Key2''' , 15 ) skip_list.delete('''X''' ) def traverse_keys(_snake_case : Optional[Any] ): yield node.key for forward_node in node.forward: yield from traverse_keys(_A ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def lowercase ( ) ->List[str]: """simple docstring""" def is_sorted(_snake_case : List[Any] ): return all(next_item >= item for item, next_item in zip(_A , lst[1:] ) ) __snake_case : Optional[Any] = SkipList() for i in range(10 ): skip_list.insert(_A , _A ) assert is_sorted(list(_A ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_A ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_A ) ) def lowercase ( ) ->List[Any]: """simple docstring""" for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def lowercase ( ) ->Union[str, Any]: """simple docstring""" __snake_case : Any = SkipList() skip_list.insert(2 , '''2''' ) skip_list.insert(4 , '''4''' ) skip_list.insert(6 , '''4''' ) skip_list.insert(4 , '''5''' ) skip_list.insert(8 , '''4''' ) skip_list.insert(9 , '''4''' ) skip_list.delete(4 ) print(_A ) if __name__ == "__main__": import doctest doctest.testmod() main()
352
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__) @dataclass class _UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ =10000 lowerCamelCase__ =None lowerCamelCase__ =None class _UpperCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCamelCase__ =ParquetConfig def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __snake_case : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a_ , (str, list, tuple) ): __snake_case : Union[str, Any] = data_files if isinstance(a_ , a_ ): __snake_case : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __snake_case : int = [] for split_name, files in data_files.items(): if isinstance(a_ , a_ ): __snake_case : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : int = [dl_manager.iter_files(a_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(a_ ): with open(a_ , '''rb''' ) as f: __snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) ) break splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) ) return splits def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ): with open(a_ , '''rb''' ) as f: __snake_case : int = pq.ParquetFile(a_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __snake_case : Dict = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" ) raise
24
0
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : Dict =RobertaTokenizer lowerCamelCase__ : Optional[int] =RobertaTokenizerFast lowerCamelCase__ : List[Any] =True lowerCamelCase__ : Tuple ={'cls_token': '<s>'} def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case : List[str] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __snake_case : Optional[int] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) __snake_case : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __snake_case : Optional[Any] = {'''unk_token''': '''<unk>'''} __snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE (self , **a_ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE (self , **a_ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = '''lower newer''' __snake_case : int = '''lower newer''' return input_text, output_text def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __snake_case : List[Any] = '''lower newer''' __snake_case : List[Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __snake_case : Dict = tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) __snake_case : List[Any] = tokens + [tokenizer.unk_token] __snake_case : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__UpperCAmelCase ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__UpperCAmelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.tokenizer_class.from_pretrained('''roberta-base''' ) __snake_case : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase ) __snake_case : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase ) __snake_case : Tuple = tokenizer.encode( '''sequence builders''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) __snake_case : List[str] = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) __snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ) __snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.get_tokenizer() __snake_case : List[str] = '''Encode this sequence.''' __snake_case : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments __snake_case : Optional[int] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) __snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase ) __snake_case : Dict = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) __snake_case : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) __snake_case : Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) __snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase ) # Testing spaces after special tokens __snake_case : Dict = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )} ) # mask token has a left space __snake_case : Union[str, Any] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) __snake_case : List[str] = '''Encode <mask> sequence''' __snake_case : Optional[Any] = '''Encode <mask>sequence''' __snake_case : List[Any] = tokenizer.encode(__UpperCAmelCase ) __snake_case : Optional[Any] = encoded.index(__UpperCAmelCase ) __snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) __snake_case : List[str] = tokenizer.encode(__UpperCAmelCase ) __snake_case : str = encoded.index(__UpperCAmelCase ) __snake_case : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) __snake_case : List[str] = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) __snake_case : List[str] = '''A, <mask> AllenNLP sentence.''' __snake_case : Union[str, Any] = tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase ) __snake_case : Union[str, Any] = tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __snake_case : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __snake_case : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __snake_case : int = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) __snake_case : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __snake_case : List[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __UpperCAmelCase ) self.assertEqual(post_processor_state['''add_prefix_space'''] , __UpperCAmelCase ) self.assertEqual(post_processor_state['''trim_offsets'''] , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case : Any = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` __snake_case : Any = f"""{text_of_1_token} {text_of_1_token}""" __snake_case : Any = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) __snake_case : int = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) __snake_case : Tuple = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) __snake_case : Tuple = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) __snake_case : List[str] = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) __snake_case : int = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) __snake_case : Dict = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) __snake_case : Dict = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) __snake_case : Optional[int] = f""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) __snake_case : int = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) __snake_case : str = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) __snake_case : Dict = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) __snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) __snake_case : List[Any] = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
353
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __snake_case : Dict = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''sshleifer/tiny-gpt2''' __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = '''sgugger/tiny-distilbert-classification''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) __snake_case : Optional[Any] = TensorFlowBenchmark(a_ ) __snake_case : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Any = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ ) __snake_case : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''sshleifer/tiny-gpt2''' __snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Dict = TensorFlowBenchmark(a_ , [config] ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : int = TensorFlowBenchmark(a_ ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Dict = AutoConfig.from_pretrained(a_ ) __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' __snake_case : Tuple = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] ) __snake_case : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , ) __snake_case : Union[str, Any] = TensorFlowBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(a_ ): self.assertTrue(hasattr(a_ , '''sequential''' ) ) self.assertTrue(hasattr(a_ , '''cumulative''' ) ) self.assertTrue(hasattr(a_ , '''current''' ) ) self.assertTrue(hasattr(a_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ ) __snake_case : Optional[int] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
24
0
"""simple docstring""" from __future__ import annotations from typing import Any def lowercase ( _snake_case : list ) ->Dict: """simple docstring""" if not postfix_notation: return 0 __snake_case : Dict = {"""+""", """-""", """*""", """/"""} __snake_case : list[Any] = [] for token in postfix_notation: if token in operations: __snake_case : Optional[int] = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(snake_case_ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
354
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: SCREAMING_SNAKE_CASE : Tuple = None try: import msvcrt except ImportError: SCREAMING_SNAKE_CASE : List[str] = None try: import fcntl except ImportError: SCREAMING_SNAKE_CASE : Tuple = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: SCREAMING_SNAKE_CASE : List[str] = OSError # Data # ------------------------------------------------ SCREAMING_SNAKE_CASE : List[Any] = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] SCREAMING_SNAKE_CASE : List[Any] = """3.0.12""" SCREAMING_SNAKE_CASE : int = None def lowercase ( ) ->str: """simple docstring""" global _logger __snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ ) return _logger class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[int] = lock_file return None def __str__(self ): '''simple docstring''' __snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired.""" return temp class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = lock return None def __enter__(self ): '''simple docstring''' return self.lock def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.lock.release() return None class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long __snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ ) # The path to the lock file. __snake_case : str = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __snake_case : Dict = None # The default timeout value. __snake_case : List[Any] = timeout # We use this lock primarily for the lock counter. __snake_case : Tuple = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __snake_case : Optional[Any] = 0 return None @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = float(a_ ) return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ): '''simple docstring''' if timeout is None: __snake_case : List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __snake_case : Optional[int] = id(self ) __snake_case : str = self._lock_file __snake_case : Optional[int] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(a_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __snake_case : Optional[int] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE (self , a_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __snake_case : Tuple = id(self ) __snake_case : str = self._lock_file logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __snake_case : Dict = 0 logger().debug(f"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__(self ): '''simple docstring''' self.acquire() return self def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.release() return None def __del__(self ): '''simple docstring''' self.release(force=a_ ) return None def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Any = os.path.basename(a_ ) if len(a_ ) > max_length and max_length > 0: __snake_case : List[Any] = os.path.dirname(a_ ) __snake_case : Any = str(hash(a_ ) ) __snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(a_ , a_ ) else: return path class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) __snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __snake_case : Any = os.open(self._lock_file , a_ ) except OSError: pass else: try: msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(a_ ) else: __snake_case : Dict = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Dict = None msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 ) os.close(a_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC __snake_case : List[str] = os.open(self._lock_file , a_ ) try: fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(a_ ) else: __snake_case : Optional[int] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Tuple = None fcntl.flock(a_ , fcntl.LOCK_UN ) os.close(a_ ) return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __snake_case : Tuple = os.open(self._lock_file , a_ ) except OSError: pass else: __snake_case : List[Any] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' os.close(self._lock_file_fd ) __snake_case : int = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None SCREAMING_SNAKE_CASE : Dict = None if msvcrt: SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock elif fcntl: SCREAMING_SNAKE_CASE : List[str] = UnixFileLock else: SCREAMING_SNAKE_CASE : str = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
24
0
def lowercase ( _snake_case : str ) ->bool: """simple docstring""" __snake_case : Any = 0 for ch in input_str: __snake_case : Any = ord(lowercase__ ) __snake_case : Dict = pow(2 , lowercase__ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
355
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ): '''simple docstring''' __snake_case : Any = parent __snake_case : int = batch_size __snake_case : Dict = seq_length __snake_case : List[str] = is_training __snake_case : List[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : str = vocab_size __snake_case : int = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : str = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : Optional[Any] = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Any = scope __snake_case : Any = range_bbox def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : List[str] = bbox[i, j, 3] __snake_case : Any = bbox[i, j, 1] __snake_case : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : List[str] = bbox[i, j, 2] __snake_case : Union[str, Any] = bbox[i, j, 0] __snake_case : Dict = t __snake_case : Optional[int] = None if self.use_input_mask: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case : Dict = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[str] = None __snake_case : Union[str, Any] = None if self.use_labels: __snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ ) __snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ ) __snake_case : List[str] = model(a_ , bbox=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : List[str] = LiltForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Tuple = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Dict = config_and_inputs __snake_case : Any = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ =( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Dict = type self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = LiltModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ ) __snake_case : Dict = torch.tensor([[1, 2]] , device=a_ ) __snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ ) # forward pass with torch.no_grad(): __snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ ) __snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] ) __snake_case : str = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , ) self.assertTrue(outputs.last_hidden_state.shape , a_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
24
0
"""simple docstring""" def lowercase ( _snake_case : int = 4_000_000 ) ->List[Any]: """simple docstring""" __snake_case : Optional[Any] = [0, 1] __snake_case : Union[str, Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 __snake_case : int = 0 for j in range(len(A__ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'{solution() = }')
356
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): '''simple docstring''' __snake_case : List[Any] = parent __snake_case : List[Any] = batch_size __snake_case : str = seq_length __snake_case : Any = is_training __snake_case : Any = use_input_mask __snake_case : str = use_token_type_ids __snake_case : Dict = use_labels __snake_case : int = vocab_size __snake_case : Union[str, Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : str = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : str = hidden_act __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : Dict = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : Union[str, Any] = initializer_range __snake_case : str = num_labels __snake_case : Dict = num_choices __snake_case : Optional[int] = scope def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Dict = None if self.use_input_mask: __snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Tuple = None __snake_case : List[str] = None __snake_case : Dict = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = DistilBertModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model(a_ , a_ ) __snake_case : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : Optional[Any] = model( a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Any = self.num_labels __snake_case : Optional[int] = DistilBertForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = self.num_labels __snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = self.num_choices __snake_case : Any = DistilBertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[int] = model( a_ , attention_mask=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs __snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ =( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = DistilBertModelTester(self ) __snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Tuple = DistilBertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __snake_case : List[str] = True __snake_case : Tuple = model_class(config=a_ ) __snake_case : Any = self._prepare_for_class(a_ , a_ ) __snake_case : Dict = torch.jit.trace( a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) ) __snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ ) loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case : List[Any] = model(a_ , attention_mask=a_ )[0] __snake_case : Tuple = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , a_ ) __snake_case : Optional[int] = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
24
0
"""simple docstring""" from __future__ import annotations def lowercase ( _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Any ) ->Union[str, Any]: """simple docstring""" __snake_case : Optional[int] = [] __snake_case , __snake_case : List[Any] = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __snake_case : List[Any] = result + left + right return input_list def lowercase ( _snake_case : List[Any] ) ->Union[str, Any]: """simple docstring""" if len(_snake_case ) <= 1: return input_list __snake_case : Union[str, Any] = list(_snake_case ) # iteration for two-way merging __snake_case : int = 2 while p <= len(_snake_case ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(_snake_case ) , _snake_case ): __snake_case : List[Any] = i __snake_case : Optional[int] = i + p - 1 __snake_case : Tuple = (low + high + 1) // 2 __snake_case : List[str] = merge(_snake_case , _snake_case , _snake_case , _snake_case ) # final merge of last two parts if p * 2 >= len(_snake_case ): __snake_case : List[str] = i __snake_case : List[str] = merge(_snake_case , 0 , _snake_case , len(_snake_case ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": SCREAMING_SNAKE_CASE : Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": SCREAMING_SNAKE_CASE : Tuple = [] else: SCREAMING_SNAKE_CASE : str = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
357
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]: """simple docstring""" def get_masked_lm_array(_snake_case : str ): __snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : str = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Any = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_array(_snake_case : str ): __snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_layer_array(_snake_case : int , _snake_case : str ): __snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[Any] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ): __snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case ) __snake_case : int = array.reshape(_snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) print(f"""Loading model based on config from {config_path}...""" ) __snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case ) __snake_case : Dict = BertForMaskedLM(_snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __snake_case : BertSelfAttention = layer.attention.self __snake_case : int = get_encoder_attention_layer_array( _snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __snake_case : List[Any] = get_encoder_attention_layer_array( _snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __snake_case : Union[str, Any] = get_encoder_attention_layer_array( _snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case : BertSelfOutput = layer.attention.output __snake_case : Dict = get_encoder_attention_layer_array( _snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' ) __snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' ) # Intermediate __snake_case : BertIntermediate = layer.intermediate __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' ) __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' ) # Output __snake_case : BertOutput = layer.output __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' ) __snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' ) __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' ) __snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' ) # Embeddings __snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' ) __snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' ) __snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' ) __snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __snake_case : Optional[Any] = model.cls.predictions.transform __snake_case : Dict = get_masked_lm_array('''dense/kernel''' ) __snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' ) __snake_case : str = get_masked_lm_array('''layer_norm/gamma''' ) __snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' ) __snake_case : Tuple = get_masked_lm_array('''embedding_table''' ) # Pooling __snake_case : Optional[Any] = BertPooler(config=_snake_case ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(_snake_case ) # Integration test - should load without any errors ;) __snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
0
"""simple docstring""" def lowercase ( _snake_case : float , _snake_case : float ) ->float: """simple docstring""" if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
358
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ): '''simple docstring''' super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ ) __snake_case : Union[str, Any] = Sql( cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = None __snake_case : Dict = None __snake_case : Dict = None __snake_case : List[str] = None self.builder.download_and_prepare( download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , ) # Build dataset for splits __snake_case : Any = self.builder.as_dataset( split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory ) return dataset class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __snake_case : List[str] = dataset __snake_case : Tuple = name __snake_case : Optional[int] = con __snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __snake_case : Dict = num_proc __snake_case : Dict = to_sql_kwargs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ ) __snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ ) __snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ ) __snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs ) return written def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case , __snake_case , __snake_case : Optional[Any] = args __snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __snake_case : Dict = query_table( table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __snake_case : Tuple = batch.to_pandas() __snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ ) return num_rows or len(a_ ) def SCREAMING_SNAKE_CASE (self , a_ , **a_ ): '''simple docstring''' __snake_case : int = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
24
0
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def lowercase ( _snake_case : Union[str, Any] ) ->Dict: """simple docstring""" if "img_encoder.pos_embed" in name: __snake_case : List[str] = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' ) if "img_encoder.patch_embed.proj" in name: __snake_case : List[str] = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' ) if "img_encoder.patch_embed.norm" in name: __snake_case : str = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' ) if "img_encoder.layers" in name: __snake_case : Optional[int] = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' ) if "blocks" in name and "res" not in name: __snake_case : List[str] = name.replace('''blocks''' , '''layers''' ) if "attn" in name and "pre_assign" not in name: __snake_case : Union[str, Any] = name.replace('''attn''' , '''self_attn''' ) if "proj" in name and "self_attn" in name and "text" not in name: __snake_case : int = name.replace('''proj''' , '''out_proj''' ) if "pre_assign_attn.attn.proj" in name: __snake_case : Union[str, Any] = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' ) if "norm1" in name: __snake_case : Union[str, Any] = name.replace('''norm1''' , '''layer_norm1''' ) if "norm2" in name and "pre_assign" not in name: __snake_case : Optional[int] = name.replace('''norm2''' , '''layer_norm2''' ) if "img_encoder.norm" in name: __snake_case : int = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' ) # text encoder if "text_encoder.token_embedding" in name: __snake_case : Tuple = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' ) if "text_encoder.positional_embedding" in name: __snake_case : Optional[Any] = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "text_encoder.transformer.resblocks." in name: __snake_case : Optional[int] = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' ) if "ln_1" in name: __snake_case : List[Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __snake_case : Optional[Any] = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __snake_case : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __snake_case : Dict = name.replace('''c_proj''' , '''fc2''' ) if "text_encoder" in name: __snake_case : List[Any] = name.replace('''text_encoder''' , '''text_model''' ) if "ln_final" in name: __snake_case : Optional[int] = name.replace('''ln_final''' , '''final_layer_norm''' ) # projection layers if "img_projector.linear_hidden." in name: __snake_case : Optional[int] = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' ) if "img_projector.linear_out." in name: __snake_case : str = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' ) if "text_projector.linear_hidden" in name: __snake_case : List[Any] = name.replace('''text_projector.linear_hidden''' , '''text_projection''' ) if "text_projector.linear_out" in name: __snake_case : Optional[Any] = name.replace('''text_projector.linear_out''' , '''text_projection.3''' ) return name def lowercase ( _snake_case : List[str] , _snake_case : Union[str, Any] ) ->List[str]: """simple docstring""" for key in orig_state_dict.copy().keys(): __snake_case : Union[str, Any] = orig_state_dict.pop(lowerCAmelCase__ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __snake_case : Optional[int] = key.split('''.''' ) __snake_case , __snake_case : Optional[int] = int(key_split[2] ), int(key_split[4] ) __snake_case : List[str] = config.vision_config.hidden_size if "weight" in key: __snake_case : List[Any] = val[:dim, :] __snake_case : Tuple = val[dim : dim * 2, :] __snake_case : int = val[-dim:, :] else: __snake_case : Union[str, Any] = val[:dim] __snake_case : Optional[int] = val[dim : dim * 2] __snake_case : List[Any] = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors __snake_case : Union[str, Any] = key.split('''.''' ) __snake_case : List[str] = int(key_split[3] ) __snake_case : Union[str, Any] = config.text_config.hidden_size if "weight" in key: __snake_case : Any = val[:dim, :] __snake_case : Optional[int] = val[ dim : dim * 2, : ] __snake_case : Optional[int] = val[-dim:, :] else: __snake_case : str = val[:dim] __snake_case : int = val[dim : dim * 2] __snake_case : Tuple = val[-dim:] else: __snake_case : Dict = rename_key(lowerCAmelCase__ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): __snake_case : int = val.squeeze_() else: __snake_case : List[Any] = val return orig_state_dict def lowercase ( ) ->Union[str, Any]: """simple docstring""" __snake_case : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __snake_case : List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ) return im @torch.no_grad() def lowercase ( _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : int="groupvit-gcc-yfcc" , _snake_case : List[Any]=False ) ->str: """simple docstring""" __snake_case : Optional[Any] = GroupViTConfig() __snake_case : List[str] = GroupViTModel(lowerCAmelCase__ ).eval() __snake_case : Optional[int] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model'''] __snake_case : List[str] = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ ) __snake_case , __snake_case : int = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0) # verify result __snake_case : str = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) __snake_case : Optional[int] = prepare_img() __snake_case : Any = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''pt''' ) with torch.no_grad(): __snake_case : Any = model(**lowerCAmelCase__ ) if model_name == "groupvit-gcc-yfcc": __snake_case : Any = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": __snake_case : Dict = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(f"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) processor.save_pretrained(lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) print('''Successfully saved processor and model to''' , lowerCAmelCase__ ) if push_to_hub: print('''Pushing to the hub...''' ) processor.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' ) model.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
359
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='lxmert' lowerCamelCase__ ={} def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[Any] = num_attention_heads __snake_case : int = hidden_act __snake_case : int = intermediate_size __snake_case : Any = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : str = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : List[Any] = num_qa_labels __snake_case : int = num_object_labels __snake_case : Optional[Any] = num_attr_labels __snake_case : Union[str, Any] = l_layers __snake_case : Optional[int] = x_layers __snake_case : Optional[int] = r_layers __snake_case : Tuple = visual_feat_dim __snake_case : Optional[int] = visual_pos_dim __snake_case : Dict = visual_loss_normalizer __snake_case : str = task_matched __snake_case : Optional[Any] = task_mask_lm __snake_case : List[str] = task_obj_predict __snake_case : Optional[Any] = task_qa __snake_case : Any = visual_obj_loss __snake_case : int = visual_attr_loss __snake_case : List[Any] = visual_feat_loss __snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**a_ )
24
0
"""simple docstring""" from collections import defaultdict def lowercase ( _snake_case : Union[str, Any] , _snake_case : str ) ->bool: """simple docstring""" __snake_case : Dict = first_str.lower().strip() __snake_case : List[Any] = second_str.lower().strip() # Remove whitespace __snake_case : Dict = first_str.replace(''' ''' , '''''' ) __snake_case : Tuple = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(a__ ) != len(a__ ): return False # Default values for count should be 0 __snake_case : Union[str, Any] = defaultdict(a__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(a__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() SCREAMING_SNAKE_CASE : Tuple = input("""Enter the first string """).strip() SCREAMING_SNAKE_CASE : str = input("""Enter the second string """).strip() SCREAMING_SNAKE_CASE : int = check_anagrams(input_a, input_b) print(F'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
360
"""simple docstring""" def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" __snake_case : Tuple = len(_snake_case ) __snake_case : str = sum(_snake_case ) __snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __snake_case : Optional[Any] = True for i in range(1 , s + 1 ): __snake_case : int = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __snake_case : Union[str, Any] = dp[i][j - 1] if arr[i - 1] <= j: __snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __snake_case : List[str] = s - 2 * j break return diff
24
0
"""simple docstring""" import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = { """kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='align_text_model' def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=0 , a_="absolute" , a_=True , **a_ , ): '''simple docstring''' super().__init__(**a_ ) __snake_case : List[Any] = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Tuple = num_attention_heads __snake_case : Union[str, Any] = hidden_act __snake_case : Union[str, Any] = intermediate_size __snake_case : Dict = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : int = type_vocab_size __snake_case : Any = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : Any = position_embedding_type __snake_case : Any = use_cache __snake_case : str = pad_token_id @classmethod def SCREAMING_SNAKE_CASE (cls , a_ , **a_ ): '''simple docstring''' cls._set_token_in_kwargs(a_ ) __snake_case , __snake_case : Tuple = cls.get_config_dict(a_ , **a_ ) # get the text config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": __snake_case : Optional[int] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a_ , **a_ ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='align_vision_model' def __init__(self , a_ = 3 , a_ = 6_00 , a_ = 2.0 , a_ = 3.1 , a_ = 8 , a_ = [3, 3, 5, 3, 5, 5, 3] , a_ = [32, 16, 24, 40, 80, 1_12, 1_92] , a_ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , a_ = [] , a_ = [1, 2, 2, 2, 1, 2, 1] , a_ = [1, 2, 2, 3, 3, 4, 1] , a_ = [1, 6, 6, 6, 6, 6, 6] , a_ = 0.25 , a_ = "swish" , a_ = 25_60 , a_ = "mean" , a_ = 0.02 , a_ = 0.001 , a_ = 0.99 , a_ = 0.2 , **a_ , ): '''simple docstring''' super().__init__(**a_ ) __snake_case : Dict = num_channels __snake_case : Tuple = image_size __snake_case : Dict = width_coefficient __snake_case : Tuple = depth_coefficient __snake_case : Optional[Any] = depth_divisor __snake_case : List[Any] = kernel_sizes __snake_case : int = in_channels __snake_case : List[Any] = out_channels __snake_case : List[str] = depthwise_padding __snake_case : List[str] = strides __snake_case : Dict = num_block_repeats __snake_case : Union[str, Any] = expand_ratios __snake_case : Optional[Any] = squeeze_expansion_ratio __snake_case : Optional[int] = hidden_act __snake_case : List[Any] = hidden_dim __snake_case : Union[str, Any] = pooling_type __snake_case : int = initializer_range __snake_case : Tuple = batch_norm_eps __snake_case : Tuple = batch_norm_momentum __snake_case : List[str] = drop_connect_rate __snake_case : List[Any] = sum(a_ ) * 4 @classmethod def SCREAMING_SNAKE_CASE (cls , a_ , **a_ ): '''simple docstring''' cls._set_token_in_kwargs(a_ ) __snake_case , __snake_case : Optional[Any] = cls.get_config_dict(a_ , **a_ ) # get the vision config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": __snake_case : Optional[int] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a_ , **a_ ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='align' lowerCamelCase__ =True def __init__(self , a_=None , a_=None , a_=6_40 , a_=1.0 , a_=0.02 , **a_ , ): '''simple docstring''' super().__init__(**a_ ) if text_config is None: __snake_case : List[str] = {} logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' ) if vision_config is None: __snake_case : Dict = {} logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' ) __snake_case : List[Any] = AlignTextConfig(**a_ ) __snake_case : Dict = AlignVisionConfig(**a_ ) __snake_case : str = projection_dim __snake_case : List[Any] = temperature_init_value __snake_case : Any = initializer_range @classmethod def SCREAMING_SNAKE_CASE (cls , a_ , a_ , **a_ ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = copy.deepcopy(self.__dict__ ) __snake_case : Any = self.text_config.to_dict() __snake_case : Optional[int] = self.vision_config.to_dict() __snake_case : Union[str, Any] = self.__class__.model_type return output
361
"""simple docstring""" from collections.abc import Callable def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: __snake_case : List[str] = mid else: __snake_case : str = mid __snake_case : str = start + (end - start) / 2.0 return mid def lowercase ( _snake_case : float ) ->float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
24
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE : Tuple = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Any = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[str] = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
362
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[str] = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
0
"""simple docstring""" from __future__ import annotations from scipy.special import comb # type: ignore class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __snake_case : Tuple = len(_SCREAMING_SNAKE_CASE ) - 1 def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __snake_case : list[float] = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , _SCREAMING_SNAKE_CASE ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_SCREAMING_SNAKE_CASE ) , 5 ) == 1 return output_values def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __snake_case : Dict = self.basis_function(_SCREAMING_SNAKE_CASE ) __snake_case : Optional[int] = 0.0 __snake_case : Optional[int] = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def SCREAMING_SNAKE_CASE (self , a_ = 0.01 ): '''simple docstring''' from matplotlib import pyplot as plt # type: ignore __snake_case : list[float] = [] # x coordinates of points to plot __snake_case : list[float] = [] # y coordinates of points to plot __snake_case : List[Any] = 0.0 while t <= 1: __snake_case : int = self.bezier_curve_function(_SCREAMING_SNAKE_CASE ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __snake_case : Tuple = [i[0] for i in self.list_of_points] __snake_case : Any = [i[1] for i in self.list_of_points] plt.plot( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , ) plt.scatter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='''red''' , label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
363
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='CLIPImageProcessor' lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' ) __snake_case : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a_ , a_ ) def __call__(self , a_=None , a_=None , a_=None , **a_ ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: __snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: __snake_case : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*a_ , **a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*a_ , **a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
0
"""simple docstring""" import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 SCREAMING_SNAKE_CASE : Dict = get_tests_dir("""fixtures/dummy-config.json""") class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = 0 def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = AutoConfig.from_pretrained('''bert-base-uncased''' ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = AutoConfig.for_model('''roberta''' ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. __snake_case : int = os.path.join(UpperCAmelCase_ , '''fake-roberta''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) with open(os.path.join(UpperCAmelCase_ , '''config.json''' ) , '''w''' ) as f: f.write(json.dumps({} ) ) __snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' try: AutoConfig.register('''custom''' , UpperCAmelCase_ ) # Wrong model type will raise an error with self.assertRaises(UpperCAmelCase_ ): AutoConfig.register('''model''' , UpperCAmelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase_ ): AutoConfig.register('''bert''' , UpperCAmelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API __snake_case : List[str] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(UpperCAmelCase_ ) __snake_case : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with self.assertRaisesRegex( UpperCAmelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): __snake_case : Union[str, Any] = AutoConfig.from_pretrained('''bert-base''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with self.assertRaisesRegex( UpperCAmelCase_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __snake_case : int = AutoConfig.from_pretrained(UpperCAmelCase_ , revision='''aaaaaa''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with self.assertRaisesRegex( UpperCAmelCase_ , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ): __snake_case : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with self.assertRaises(UpperCAmelCase_ ): __snake_case : int = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCAmelCase_ ): __snake_case : Any = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCAmelCase_ ) __snake_case : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(UpperCAmelCase_ ) __snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' class _UpperCAmelCase ( snake_case__ ): '''simple docstring''' lowerCamelCase__ ='new-model' try: AutoConfig.register('''new-model''' , UpperCAmelCase_ ) # If remote code is not set, the default is to use local __snake_case : Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ) self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' ) # If remote code is disabled, we load the local one. __snake_case : int = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' ) # If remote is enabled, we load from the Hub __snake_case : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCAmelCase_ ) self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
364
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE : List[Any] = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE : Tuple = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =MBartTokenizer lowerCamelCase__ =[] lowerCamelCase__ =[] def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token super().__init__( vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , ) __snake_case : Tuple = vocab_file __snake_case : Optional[Any] = False if not self.vocab_file else True __snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __snake_case : Optional[int] = { lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX''' __snake_case : Any = self.convert_tokens_to_ids(self._src_lang ) __snake_case : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Tuple = [self.sep_token_id] __snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __snake_case : Optional[int] = src_lang __snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ ) __snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ ) __snake_case : int = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ): '''simple docstring''' __snake_case : int = src_lang __snake_case : List[Any] = tgt_lang return super().prepare_seqaseq_batch(a_ , a_ , **a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : List[Any] = [] __snake_case : Any = [self.eos_token_id, self.cur_lang_code] __snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : Optional[Any] = [] __snake_case : Dict = [self.eos_token_id, self.cur_lang_code] __snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return __snake_case : Optional[Any] = os.path.join( a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
24
0
"""simple docstring""" import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( lowerCamelCase_, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =ConsistencyModelPipeline lowerCamelCase__ =UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowerCamelCase__ =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt lowerCamelCase__ =frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def SCREAMING_SNAKE_CASE (self , a_=False ): '''simple docstring''' if class_cond: __snake_case : List[str] = self.dummy_cond_unet else: __snake_case : List[Any] = self.dummy_uncond_unet # Default to CM multistep sampler __snake_case : str = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __snake_case : Any = { '''unet''': unet, '''scheduler''': scheduler, } return components def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ): '''simple docstring''' if str(lowerCAmelCase__ ).startswith('''mps''' ): __snake_case : Tuple = torch.manual_seed(lowerCAmelCase__ ) else: __snake_case : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) __snake_case : int = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator __snake_case : str = self.get_dummy_components() __snake_case : str = ConsistencyModelPipeline(**lowerCAmelCase__ ) __snake_case : Optional[int] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __snake_case : Dict = self.get_dummy_inputs(lowerCAmelCase__ ) __snake_case : str = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 32, 32, 3) __snake_case : List[Any] = image[0, -3:, -3:, -1] __snake_case : str = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator __snake_case : List[str] = self.get_dummy_components(class_cond=lowerCAmelCase__ ) __snake_case : List[str] = ConsistencyModelPipeline(**lowerCAmelCase__ ) __snake_case : Any = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __snake_case : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__ ) __snake_case : Union[str, Any] = 0 __snake_case : Union[str, Any] = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 32, 32, 3) __snake_case : Optional[int] = image[0, -3:, -3:, -1] __snake_case : Tuple = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator __snake_case : str = self.get_dummy_components() __snake_case : str = ConsistencyModelPipeline(**lowerCAmelCase__ ) __snake_case : List[Any] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __snake_case : str = self.get_dummy_inputs(lowerCAmelCase__ ) __snake_case : Optional[Any] = 1 __snake_case : Dict = None __snake_case : int = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 32, 32, 3) __snake_case : Optional[int] = image[0, -3:, -3:, -1] __snake_case : str = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator __snake_case : str = self.get_dummy_components(class_cond=lowerCAmelCase__ ) __snake_case : Union[str, Any] = ConsistencyModelPipeline(**lowerCAmelCase__ ) __snake_case : List[Any] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __snake_case : int = self.get_dummy_inputs(lowerCAmelCase__ ) __snake_case : int = 1 __snake_case : int = None __snake_case : List[str] = 0 __snake_case : str = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 32, 32, 3) __snake_case : List[str] = image[0, -3:, -3:, -1] __snake_case : List[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE (self , a_=0 , a_=False , a_="cpu" , a_=torch.floataa , a_=(1, 3, 64, 64) ): '''simple docstring''' __snake_case : List[Any] = torch.manual_seed(lowerCAmelCase__ ) __snake_case : int = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __snake_case : Tuple = self.get_fixed_latents(seed=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ , shape=lowerCAmelCase__ ) __snake_case : Any = latents return inputs def SCREAMING_SNAKE_CASE (self , a_=0 , a_="cpu" , a_=torch.floataa , a_=(1, 3, 64, 64) ): '''simple docstring''' if type(lowerCAmelCase__ ) == str: __snake_case : str = torch.device(lowerCAmelCase__ ) __snake_case : Optional[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) __snake_case : Optional[int] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ) return latents def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __snake_case : Union[str, Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __snake_case : List[str] = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) pipe.to(torch_device=lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __snake_case : Optional[int] = self.get_inputs() __snake_case : Optional[int] = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 64, 64, 3) __snake_case : Optional[Any] = image[0, -3:, -3:, -1] __snake_case : Optional[Any] = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __snake_case : Tuple = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __snake_case : str = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) pipe.to(torch_device=lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __snake_case : str = self.get_inputs() __snake_case : Optional[int] = 1 __snake_case : List[Any] = None __snake_case : List[Any] = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 64, 64, 3) __snake_case : List[str] = image[0, -3:, -3:, -1] __snake_case : Tuple = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __snake_case : str = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __snake_case : Union[str, Any] = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) pipe.to(torch_device=lowerCAmelCase__ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __snake_case : Optional[int] = self.get_inputs(get_fixed_latents=lowerCAmelCase__ , device=lowerCAmelCase__ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=lowerCAmelCase__ , enable_math=lowerCAmelCase__ , enable_mem_efficient=lowerCAmelCase__ ): __snake_case : Any = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 64, 64, 3) __snake_case : Any = image[0, -3:, -3:, -1] __snake_case : Union[str, Any] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __snake_case : Any = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __snake_case : Union[str, Any] = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) pipe.to(torch_device=lowerCAmelCase__ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __snake_case : Dict = self.get_inputs(get_fixed_latents=lowerCAmelCase__ , device=lowerCAmelCase__ ) __snake_case : List[str] = 1 __snake_case : Optional[Any] = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=lowerCAmelCase__ , enable_math=lowerCAmelCase__ , enable_mem_efficient=lowerCAmelCase__ ): __snake_case : Any = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 64, 64, 3) __snake_case : Tuple = image[0, -3:, -3:, -1] __snake_case : int = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
365
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__) @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None if is_torch_available(): import torch from torch.utils.data import Dataset class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ): '''simple docstring''' __snake_case : Any = hans_processors[task]() __snake_case : int = os.path.join( a_ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , ) __snake_case : Tuple = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Dict = label_list[2], label_list[1] __snake_case : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case : int = cached_features_file + '''.lock''' with FileLock(a_ ): if os.path.exists(a_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case : Union[str, Any] = torch.load(a_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case : Dict = ( processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) ) logger.info('''Training examples: %s''' , len(a_ ) ) __snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) logger.info('''Saving features into cached file %s''' , a_ ) torch.save(self.features , a_ ) def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ): '''simple docstring''' __snake_case : List[Any] = hans_processors[task]() __snake_case : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Tuple = label_list[2], label_list[1] __snake_case : Dict = label_list __snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) __snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case : Union[str, Any] = tf.data.Dataset.from_generator( a_ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.dataset def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = [] for i, line in enumerate(a_ ): if i == 0: continue __snake_case : Tuple = '''%s-%s''' % (set_type, line[0]) __snake_case : Dict = line[5] __snake_case : int = line[6] __snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case : List[Any] = line[0] examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) ) return examples def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )} __snake_case : Tuple = [] for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , ) __snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0 __snake_case : Union[str, Any] = int(example.pairID ) features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE : Dict = { """hans""": 3, } SCREAMING_SNAKE_CASE : str = { """hans""": HansProcessor, }
24
0
"""simple docstring""" def lowercase ( _snake_case : int = 10**12 ) ->int: """simple docstring""" __snake_case : Optional[Any] = 1 __snake_case : int = 0 __snake_case : List[Any] = 1 __snake_case : int = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F'{solution() = }')
366
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='gptsan-japanese' lowerCamelCase__ =[ 'past_key_values', ] lowerCamelCase__ ={ 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ): '''simple docstring''' __snake_case : Any = vocab_size __snake_case : str = max_position_embeddings __snake_case : Any = d_model __snake_case : List[str] = d_ff __snake_case : Dict = d_ext __snake_case : Optional[Any] = d_spout __snake_case : int = num_switch_layers __snake_case : List[Any] = num_ext_layers __snake_case : Any = num_switch_layers + num_ext_layers __snake_case : Optional[int] = num_heads __snake_case : Tuple = num_experts __snake_case : List[Any] = expert_capacity __snake_case : Dict = dropout_rate __snake_case : Optional[Any] = layer_norm_epsilon __snake_case : Dict = router_bias __snake_case : str = router_jitter_noise __snake_case : List[str] = router_dtype __snake_case : Union[str, Any] = router_ignore_padding_tokens __snake_case : List[str] = output_hidden_states __snake_case : Optional[Any] = output_attentions __snake_case : Any = initializer_factor __snake_case : int = output_router_logits __snake_case : Union[str, Any] = use_cache super().__init__( separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
24
0
"""simple docstring""" import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py SCREAMING_SNAKE_CASE : Optional[int] = """src/transformers""" SCREAMING_SNAKE_CASE : Tuple = """docs/source/en""" SCREAMING_SNAKE_CASE : List[Any] = """.""" def lowercase ( _snake_case : List[str] , _snake_case : Any , _snake_case : Tuple ) ->List[str]: """simple docstring""" with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __snake_case : Optional[Any] = f.readlines() # Find the start prompt. __snake_case : Any = 0 while not lines[start_index].startswith(__lowerCamelCase ): start_index += 1 start_index += 1 __snake_case : Union[str, Any] = start_index while not lines[end_index].startswith(__lowerCamelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | SCREAMING_SNAKE_CASE : int = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. SCREAMING_SNAKE_CASE : List[Any] = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. SCREAMING_SNAKE_CASE : str = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. SCREAMING_SNAKE_CASE : Any = direct_transformers_import(TRANSFORMERS_PATH) def lowercase ( _snake_case : str ) ->Any: """simple docstring""" __snake_case : Any = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __lowerCamelCase ) return [m.group(0 ) for m in matches] def lowercase ( _snake_case : Tuple , _snake_case : int ) ->Optional[int]: """simple docstring""" __snake_case : Tuple = 2 if text == '''✅''' or text == '''❌''' else len(__lowerCamelCase ) __snake_case : Tuple = (width - text_length) // 2 __snake_case : str = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowercase ( ) ->Tuple: """simple docstring""" __snake_case : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __snake_case : Optional[int] = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } __snake_case : Optional[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. __snake_case : Optional[Any] = collections.defaultdict(__lowerCamelCase ) __snake_case : Optional[int] = collections.defaultdict(__lowerCamelCase ) __snake_case : Optional[int] = collections.defaultdict(__lowerCamelCase ) __snake_case : Any = collections.defaultdict(__lowerCamelCase ) __snake_case : List[Any] = collections.defaultdict(__lowerCamelCase ) # Let's lookup through all transformers object (once). for attr_name in dir(__lowerCamelCase ): __snake_case : Union[str, Any] = None if attr_name.endswith('''Tokenizer''' ): __snake_case : Optional[Any] = slow_tokenizers __snake_case : str = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): __snake_case : Optional[Any] = fast_tokenizers __snake_case : int = attr_name[:-13] elif _re_tf_models.match(__lowerCamelCase ) is not None: __snake_case : Optional[int] = tf_models __snake_case : Dict = _re_tf_models.match(__lowerCamelCase ).groups()[0] elif _re_flax_models.match(__lowerCamelCase ) is not None: __snake_case : int = flax_models __snake_case : Optional[int] = _re_flax_models.match(__lowerCamelCase ).groups()[0] elif _re_pt_models.match(__lowerCamelCase ) is not None: __snake_case : Any = pt_models __snake_case : Any = _re_pt_models.match(__lowerCamelCase ).groups()[0] if lookup_dict is not None: while len(__lowerCamelCase ) > 0: if attr_name in model_name_to_prefix.values(): __snake_case : Optional[Any] = True break # Try again after removing the last word in the name __snake_case : Dict = ''''''.join(camel_case_split(__lowerCamelCase )[:-1] ) # Let's build that table! __snake_case : Optional[int] = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) __snake_case : Optional[Any] = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). __snake_case : Tuple = [len(__lowerCamelCase ) + 2 for c in columns] __snake_case : int = max([len(__lowerCamelCase ) for name in model_names] ) + 2 # Build the table per se __snake_case : Dict = '''|''' + '''|'''.join([_center_text(__lowerCamelCase , __lowerCamelCase ) for c, w in zip(__lowerCamelCase , __lowerCamelCase )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" __snake_case : Tuple = {True: '''✅''', False: '''❌'''} for name in model_names: __snake_case : List[str] = model_name_to_prefix[name] __snake_case : int = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__lowerCamelCase , __lowerCamelCase ) for l, w in zip(__lowerCamelCase , __lowerCamelCase )] ) + "|\n" return table def lowercase ( _snake_case : Union[str, Any]=False ) ->Any: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = _find_text_in_file( filename=os.path.join(__lowerCamelCase , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) __snake_case : List[Any] = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__lowerCamelCase , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() check_model_table(args.fix_and_overwrite)
367
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } SCREAMING_SNAKE_CASE : int = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowercase ( _snake_case : Optional[int] ) ->int: """simple docstring""" __snake_case : int = {} with open(_snake_case , '''r''' ) as file: for line_number, line in enumerate(_snake_case ): __snake_case : Union[str, Any] = line.strip() if line: __snake_case : str = line.split() __snake_case : Union[str, Any] = line_number __snake_case : Dict = words[0] __snake_case : str = value return result def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]: """simple docstring""" for attribute in key.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : str = '''param''' if weight_type is not None and weight_type != "param": __snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape elif weight_type is not None and weight_type == "param": __snake_case : Optional[Any] = hf_pointer for attribute in hf_param_name.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : List[str] = shape_pointer.shape # let's reduce dimension __snake_case : int = value[0] else: __snake_case : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : List[Any] = value elif weight_type == "weight_g": __snake_case : Tuple = value elif weight_type == "weight_v": __snake_case : str = value elif weight_type == "bias": __snake_case : str = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __snake_case : List[Any] = getattr(_snake_case , _snake_case ) __snake_case : int = value else: __snake_case : List[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int: """simple docstring""" __snake_case : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : List[str] = '''param''' if weight_type is not None and weight_type != "param": __snake_case : str = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __snake_case : Tuple = '''.'''.join([key, hf_param_name] ) else: __snake_case : Optional[int] = key __snake_case : List[Any] = value if '''lm_head''' in full_key else value[0] SCREAMING_SNAKE_CASE : Tuple = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict: """simple docstring""" __snake_case : Tuple = False for key, mapped_key in MAPPING.items(): __snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case : int = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2] __snake_case : Tuple = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: __snake_case : Union[str, Any] = '''weight_g''' elif "weight_v" in name: __snake_case : List[str] = '''weight_v''' elif "bias" in name: __snake_case : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case : List[Any] = '''weight''' else: __snake_case : Union[str, Any] = None if hf_dict is not None: rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) else: set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) return is_used return is_used def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any: """simple docstring""" __snake_case : Union[str, Any] = [] __snake_case : Union[str, Any] = fairseq_model.state_dict() __snake_case : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __snake_case : str = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case : Union[str, Any] = True else: __snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case ) if not is_used: unused_weights.append(_snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] __snake_case : str = name.split('''.''' ) __snake_case : Optional[int] = int(items[0] ) __snake_case : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_snake_case ) @torch.no_grad() def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict: """simple docstring""" if config_path is not None: __snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case ) else: __snake_case : Tuple = WavaVecaConfig() if is_seq_class: __snake_case : Optional[int] = read_txt_into_dict(_snake_case ) __snake_case : List[Any] = idalabel __snake_case : int = WavaVecaForSequenceClassification(_snake_case ) __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) feature_extractor.save_pretrained(_snake_case ) elif is_finetuned: if dict_path: __snake_case : int = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Tuple = target_dict.pad_index __snake_case : int = target_dict.bos_index __snake_case : Tuple = target_dict.eos_index __snake_case : Optional[Any] = len(target_dict.symbols ) __snake_case : Any = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) __snake_case : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case : Dict = 0 __snake_case : List[Any] = 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_snake_case , _snake_case ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) __snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False __snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) __snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) __snake_case : Optional[int] = WavaVecaForCTC(_snake_case ) else: __snake_case : Tuple = WavaVecaForPreTraining(_snake_case ) if is_finetuned or is_seq_class: __snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' ) __snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case ) __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case ) __snake_case : int = model[0].eval() recursively_load_weights(_snake_case , _snake_case , not is_finetuned ) hf_wavavec.save_pretrained(_snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) SCREAMING_SNAKE_CASE : Any = parser.parse_args() SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
24
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE : Any = { "vocab_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json" ), }, "merges_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt" ), }, "tokenizer_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json", "roberta-base-openai-detector": ( "https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json" ), "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE : str = { "roberta-base": 512, "roberta-large": 512, "roberta-large-mnli": 512, "distilroberta-base": 512, "roberta-base-openai-detector": 512, "roberta-large-openai-detector": 512, } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =RobertaTokenizer def __init__(self , a_=None , a_=None , a_=None , a_="replace" , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=False , a_=True , **a_ , ): '''simple docstring''' super().__init__( _a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , ) __snake_case : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space: __snake_case : Any = getattr(_a , pre_tok_state.pop('''type''' ) ) __snake_case : str = add_prefix_space __snake_case : List[Any] = pre_tok_class(**_a ) __snake_case : List[Any] = add_prefix_space __snake_case : List[str] = "post_processor" __snake_case : List[Any] = getattr(self.backend_tokenizer , _a , _a ) if tokenizer_component_instance: __snake_case : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __snake_case : Union[str, Any] = tuple(state['''sep'''] ) if "cls" in state: __snake_case : Any = tuple(state['''cls'''] ) __snake_case : Dict = False if state.get('''add_prefix_space''' , _a ) != add_prefix_space: __snake_case : str = add_prefix_space __snake_case : int = True if state.get('''trim_offsets''' , _a ) != trim_offsets: __snake_case : Dict = trim_offsets __snake_case : Optional[int] = True if changes_to_apply: __snake_case : List[str] = getattr(_a , state.pop('''type''' ) ) __snake_case : List[Any] = component_class(**_a ) setattr(self.backend_tokenizer , _a , _a ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value __snake_case : List[str] = value def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' __snake_case : Any = kwargs.get('''is_split_into_words''' , _a ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_a , **_a ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' __snake_case : List[str] = kwargs.get('''is_split_into_words''' , _a ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*_a , **_a ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Optional[int] = self._tokenizer.model.save(_a , name=_a ) return tuple(_a ) def SCREAMING_SNAKE_CASE (self , a_ , a_=None ): '''simple docstring''' __snake_case : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Optional[int] = [self.sep_token_id] __snake_case : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
368
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=__snake_case ): '''simple docstring''' lowerCamelCase__ =['transformers', 'torch', 'note_seq'] def __init__(self , *a_ , **a_ ): '''simple docstring''' requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
24
0
"""simple docstring""" from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase ( _a ): '''simple docstring''' lowerCamelCase__ ='new-model' if is_tf_available(): class _UpperCAmelCase ( _a ): '''simple docstring''' lowerCamelCase__ =NewModelConfig @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = "bert-base-cased" __snake_case : Any = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) __snake_case : Optional[int] = TFAutoModel.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = "bert-base-cased" __snake_case : Optional[int] = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) __snake_case : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Union[str, Any] = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) __snake_case : Any = TFAutoModelForCausalLM.from_pretrained(_a ) __snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(_a , output_loading_info=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Optional[int] = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) __snake_case : List[Any] = TFAutoModelWithLMHead.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) __snake_case : int = TFAutoModelForMaskedLM.from_pretrained(_a ) __snake_case : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(_a , output_loading_info=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Optional[int] = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) __snake_case : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_a ) __snake_case : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_a , output_loading_info=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in ["bert-base-uncased"]: __snake_case : int = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) __snake_case : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in ["bert-base-uncased"]: __snake_case : Dict = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) __snake_case : Union[str, Any] = TFAutoModelForQuestionAnswering.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow @require_tensorflow_probability def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: __snake_case : int = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) __snake_case : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(_a ) __snake_case : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained( _a , output_loading_info=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = TFAutoModelWithLMHead.from_pretrained(_a ) self.assertIsInstance(_a , _a ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_a ) , 1_44_10 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(_a ) self.assertIsInstance(_a , _a ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_a ) , 1_44_10 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(_a , _a ) __snake_case : str = copy.deepcopy(model.config ) __snake_case : Optional[int] = ["FunnelBaseModel"] __snake_case : Any = TFAutoModel.from_config(_a ) self.assertIsInstance(_a , _a ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_a ) __snake_case : Any = TFAutoModel.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' try: AutoConfig.register('''new-model''' , _a ) __snake_case : Dict = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(_a ): auto_class.register(_a , _a ) auto_class.register(_a , _a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_a ): auto_class.register(_a , _a ) # Now that the config is registered, it can be used as any other config with the auto-API __snake_case : str = BertModelTester(self ).get_config() __snake_case : Optional[int] = NewModelConfig(**tiny_config.to_dict() ) __snake_case : Optional[int] = auto_class.from_config(_a ) self.assertIsInstance(_a , _a ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_a ) __snake_case : List[Any] = auto_class.from_pretrained(_a ) self.assertIsInstance(_a , _a ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with self.assertRaisesRegex( _a , '''bert-base is not a local folder and is not a valid model identifier''' ): __snake_case : Optional[Any] = TFAutoModel.from_pretrained('''bert-base''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with self.assertRaisesRegex( _a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __snake_case : str = TFAutoModel.from_pretrained(_a , revision='''aaaaaa''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with self.assertRaisesRegex( _a , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): __snake_case : int = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with self.assertRaisesRegex(_a , '''Use `from_pt=True` to load this model''' ): __snake_case : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: __snake_case : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint __snake_case : Union[str, Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: __snake_case : Tuple = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
369
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ): '''simple docstring''' __snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20} __snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __snake_case : Tuple = parent __snake_case : Tuple = batch_size __snake_case : Tuple = num_channels __snake_case : List[str] = image_size __snake_case : Optional[Any] = min_resolution __snake_case : List[Any] = max_resolution __snake_case : List[Any] = do_resize __snake_case : Dict = size __snake_case : Dict = do_center_crop __snake_case : Dict = crop_size __snake_case : str = do_flip_channel_order def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , '''do_resize''' ) ) self.assertTrue(hasattr(a_ , '''size''' ) ) self.assertTrue(hasattr(a_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a_ , '''center_crop''' ) ) self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input __snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
24
0
"""simple docstring""" from torch import nn def lowercase ( _snake_case : Tuple ) ->List[str]: """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"""Unsupported activation function: {act_fn}""" )
370
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : int = torch.nn.Linear(2 , 4 ) __snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) __snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) __snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) __snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def lowercase ( _snake_case : str ) ->Optional[Any]: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def lowercase ( _snake_case : Union[str, Any] ) ->Tuple: """simple docstring""" __snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_snake_case ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a_ ): __snake_case : Any = Accelerator(cpu=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case : Optional[int] = GradientState() assert state.num_steps == 1 __snake_case : str = 4 assert state.num_steps == 4 assert state.sync_gradients is True __snake_case : List[Any] = False assert state.sync_gradients is False GradientState._reset_state() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a_ , **a_ ): pass with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ): __snake_case : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : Any = get_signature(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : List[Any] = get_signature(a_ ) # saving hook def save_config(a_ , a_ , a_ ): __snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f: json.dump(a_ , a_ ) # loading hook def load_config(a_ , a_ ): with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f: __snake_case : Any = json.load(a_ ) __snake_case : List[str] = config['''class_name'''] __snake_case : str = accelerator.register_save_state_pre_hook(a_ ) __snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Any = '''random''' # make sure loaded weights match with hooks accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks removed load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Union[str, Any] = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components() __snake_case : Union[str, Any] = None # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertTrue(dummy_obj is None ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() __snake_case : Optional[int] = [1, 2, 3] # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , ) __snake_case : Optional[Any] = Accelerator() # This should work __snake_case : Any = accelerator.prepare(a_ ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Any = Accelerator() with init_empty_weights(): __snake_case : List[str] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : Union[str, Any] = infer_auto_device_map(a_ ) __snake_case : str = '''cpu''' __snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ ) # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Dict = accelerator.prepare(a_ ) @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): __snake_case : Any = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : List[Any] = infer_auto_device_map(a_ ) __snake_case : Dict = 1 __snake_case : str = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Any = Accelerator() # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Tuple = accelerator.prepare(a_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) __snake_case : Tuple = infer_auto_device_map(a_ ) __snake_case : Tuple = 1 __snake_case : List[Any] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Tuple = Accelerator() # This should work __snake_case : Dict = accelerator.prepare(a_ ) @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = torch.nn.Linear(10 , 10 ) __snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 ) __snake_case : Optional[Any] = Accelerator(cpu=a_ ) __snake_case : str = accelerator.prepare(a_ )
24
0
"""simple docstring""" from math import factorial class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ ): '''simple docstring''' __snake_case : Optional[int] = real if isinstance(__lowercase , __lowercase ): __snake_case : str = [1] * rank else: __snake_case : Union[str, Any] = rank def __repr__(self ): '''simple docstring''' return ( f"""{self.real}+""" f"""{'+'.join(str(__lowercase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}""" ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , __lowercase ) def __add__(self , a_ ): '''simple docstring''' if not isinstance(__lowercase , __lowercase ): return Dual(self.real + other , self.duals ) __snake_case : int = self.duals.copy() __snake_case : List[Any] = other.duals.copy() if len(__lowercase ) > len(__lowercase ): o_dual.extend([1] * (len(__lowercase ) - len(__lowercase )) ) elif len(__lowercase ) < len(__lowercase ): s_dual.extend([1] * (len(__lowercase ) - len(__lowercase )) ) __snake_case : List[str] = [] for i in range(len(__lowercase ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , __lowercase ) lowerCamelCase__ =__add__ def __sub__(self , a_ ): '''simple docstring''' return self + other * -1 def __mul__(self , a_ ): '''simple docstring''' if not isinstance(__lowercase , __lowercase ): __snake_case : Dict = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , __lowercase ) __snake_case : List[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , __lowercase ) lowerCamelCase__ =__mul__ def __truediv__(self , a_ ): '''simple docstring''' if not isinstance(__lowercase , __lowercase ): __snake_case : Tuple = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , __lowercase ) raise ValueError def __floordiv__(self , a_ ): '''simple docstring''' if not isinstance(__lowercase , __lowercase ): __snake_case : List[Any] = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , __lowercase ) raise ValueError def __pow__(self , a_ ): '''simple docstring''' if n < 0 or isinstance(__lowercase , __lowercase ): raise ValueError('''power must be a positive integer''' ) if n == 0: return 1 if n == 1: return self __snake_case : str = self for _ in range(n - 1 ): x *= self return x def lowercase ( _snake_case : Dict , _snake_case : List[Any] , _snake_case : Tuple ) ->Dict: """simple docstring""" if not callable(__lowerCAmelCase ): raise ValueError('''differentiate() requires a function as input for func''' ) if not isinstance(__lowerCAmelCase , (float, int) ): raise ValueError('''differentiate() requires a float as input for position''' ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('''differentiate() requires an int as input for order''' ) __snake_case : Any = Dual(__lowerCAmelCase , 1 ) __snake_case : Any = func(__lowerCAmelCase ) if order == 0: return result.real return result.duals[order - 1] * factorial(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() def lowercase ( _snake_case : Optional[Any] ) ->List[Any]: """simple docstring""" return y**2 * y**4 print(differentiate(f, 9, 2))
371
"""simple docstring""" def lowercase ( _snake_case : int ) ->str: """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) __snake_case : Any = len(bin(_snake_case )[3:] ) __snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:] __snake_case : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(_snake_case )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
24
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=10 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=[0.5, 0.5, 0.5] , a_=[0.5, 0.5, 0.5] , a_=None , ): '''simple docstring''' __snake_case : List[str] = size if size is not None else {'shortest_edge': 18} __snake_case : Union[str, Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18} __snake_case : List[str] = parent __snake_case : Dict = batch_size __snake_case : Tuple = num_channels __snake_case : Optional[Any] = num_frames __snake_case : List[Any] = image_size __snake_case : Optional[int] = min_resolution __snake_case : Dict = max_resolution __snake_case : Dict = do_resize __snake_case : Any = size __snake_case : int = do_normalize __snake_case : Optional[int] = image_mean __snake_case : List[str] = image_std __snake_case : Optional[int] = crop_size def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _UpperCAmelCase ( __UpperCamelCase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =VivitImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = VivitImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , '''image_mean''' ) ) self.assertTrue(hasattr(a_ , '''image_std''' ) ) self.assertTrue(hasattr(a_ , '''do_normalize''' ) ) self.assertTrue(hasattr(a_ , '''do_resize''' ) ) self.assertTrue(hasattr(a_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a_ , '''size''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos __snake_case : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=a_ ) for video in video_inputs: self.assertIsInstance(a_ , a_ ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input __snake_case : int = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Any = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for video in video_inputs: self.assertIsInstance(a_ , a_ ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input __snake_case : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : List[Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : int = prepare_video_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for video in video_inputs: self.assertIsInstance(a_ , a_ ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input __snake_case : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
350
"""simple docstring""" def lowercase ( ) ->int: """simple docstring""" return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(_snake_case , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = tempfile.mkdtemp() # fmt: off __snake_case : List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __snake_case : List[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) __snake_case : List[str] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __snake_case : str = {'''unk_token''': '''<unk>'''} __snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) ) __snake_case : Any = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } __snake_case : List[str] = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE (self , **a_ ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE (self , **a_ ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE (self , **a_ ): '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __snake_case : str = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.get_tokenizer() __snake_case : Optional[Any] = self.get_rust_tokenizer() __snake_case : Optional[int] = self.get_image_processor() __snake_case : Optional[int] = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) processor_slow.save_pretrained(self.tmpdirname ) __snake_case : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE ) __snake_case : Optional[Any] = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) processor_fast.save_pretrained(self.tmpdirname ) __snake_case : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __snake_case : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __snake_case : Union[str, Any] = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 ) __snake_case : Any = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.get_image_processor() __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : str = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) __snake_case : Union[str, Any] = self.prepare_image_inputs() __snake_case : Dict = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ) __snake_case : Optional[Any] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.get_image_processor() __snake_case : Optional[int] = self.get_tokenizer() __snake_case : Dict = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) __snake_case : int = '''lower newer''' __snake_case : Optional[int] = processor(text=__SCREAMING_SNAKE_CASE ) __snake_case : str = tokenizer(__SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.get_image_processor() __snake_case : str = self.get_tokenizer() __snake_case : str = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) __snake_case : List[Any] = '''lower newer''' __snake_case : Union[str, Any] = self.prepare_image_inputs() __snake_case : Optional[Any] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__SCREAMING_SNAKE_CASE ): processor() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.get_image_processor() __snake_case : str = self.get_tokenizer() __snake_case : int = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) __snake_case : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __snake_case : Any = processor.batch_decode(__SCREAMING_SNAKE_CASE ) __snake_case : Optional[int] = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = self.get_image_processor() __snake_case : Any = self.get_tokenizer() __snake_case : Dict = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) __snake_case : int = '''lower newer''' __snake_case : Tuple = self.prepare_image_inputs() __snake_case : Any = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
351
"""simple docstring""" def lowercase ( _snake_case : int = 100 ) ->int: """simple docstring""" __snake_case : str = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Dict = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'{solution() = }')
24
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE : List[str] = { """vocab_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""", }, """tokenizer_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json""" ), """google/realm-orqa-nq-openqa""": ( """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-nq-reader""": ( """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-openqa""": ( """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-reader""": ( """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json""" ), }, } SCREAMING_SNAKE_CASE : Any = { """google/realm-cc-news-pretrained-embedder""": 512, """google/realm-cc-news-pretrained-encoder""": 512, """google/realm-cc-news-pretrained-scorer""": 512, """google/realm-cc-news-pretrained-openqa""": 512, """google/realm-orqa-nq-openqa""": 512, """google/realm-orqa-nq-reader""": 512, """google/realm-orqa-wq-openqa""": 512, """google/realm-orqa-wq-reader""": 512, } SCREAMING_SNAKE_CASE : Optional[Any] = { """google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-reader""": {"""do_lower_case""": True}, """google/realm-orqa-wq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-wq-reader""": {"""do_lower_case""": True}, } class _UpperCAmelCase ( snake_case_ ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =RealmTokenizer def __init__(self , a_=None , a_=None , a_=True , a_="[UNK]" , a_="[SEP]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_=True , a_=None , **a_ , ): '''simple docstring''' super().__init__( a_ , tokenizer_file=a_ , do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , tokenize_chinese_chars=a_ , strip_accents=a_ , **a_ , ) __snake_case : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , a_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , a_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , a_ ) != tokenize_chinese_chars ): __snake_case : Optional[Any] = getattr(a_ , normalizer_state.pop('''type''' ) ) __snake_case : Optional[int] = do_lower_case __snake_case : Optional[int] = strip_accents __snake_case : int = tokenize_chinese_chars __snake_case : Dict = normalizer_class(**a_ ) __snake_case : Optional[int] = do_lower_case def SCREAMING_SNAKE_CASE (self , a_ , **a_ ): '''simple docstring''' __snake_case : List[str] = PaddingStrategy.MAX_LENGTH __snake_case : str = text __snake_case : Dict = kwargs.pop('''text_pair''' , a_ ) __snake_case : Optional[Any] = kwargs.pop('''return_tensors''' , a_ ) __snake_case : Optional[int] = { '''input_ids''': [], '''attention_mask''': [], '''token_type_ids''': [], } for idx, candidate_text in enumerate(a_ ): if batch_text_pair is not None: __snake_case : Dict = batch_text_pair[idx] else: __snake_case : Dict = None __snake_case : Tuple = super().__call__(a_ , a_ , return_tensors=a_ , **a_ ) __snake_case : int = encoded_candidates.get('''input_ids''' ) __snake_case : Dict = encoded_candidates.get('''attention_mask''' ) __snake_case : Any = encoded_candidates.get('''token_type_ids''' ) if encoded_input_ids is not None: output_data["input_ids"].append(a_ ) if encoded_attention_mask is not None: output_data["attention_mask"].append(a_ ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(a_ ) __snake_case : Any = {key: item for key, item in output_data.items() if len(a_ ) != 0} return BatchEncoding(a_ , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_=None ): '''simple docstring''' __snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Union[str, Any] = [self.sep_token_id] __snake_case : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : int = self._tokenizer.model.save(a_ , name=a_ ) return tuple(a_ )
352
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__) @dataclass class _UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ =10000 lowerCamelCase__ =None lowerCamelCase__ =None class _UpperCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCamelCase__ =ParquetConfig def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __snake_case : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a_ , (str, list, tuple) ): __snake_case : Union[str, Any] = data_files if isinstance(a_ , a_ ): __snake_case : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __snake_case : int = [] for split_name, files in data_files.items(): if isinstance(a_ , a_ ): __snake_case : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : int = [dl_manager.iter_files(a_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(a_ ): with open(a_ , '''rb''' ) as f: __snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) ) break splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) ) return splits def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ): with open(a_ , '''rb''' ) as f: __snake_case : int = pq.ParquetFile(a_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __snake_case : Dict = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" ) raise
24
0
"""simple docstring""" import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline SCREAMING_SNAKE_CASE : int = { """n_samples""": 64, """horizon""": 32, """num_inference_steps""": 20, """n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network """scale_grad_by_std""": True, """scale""": 0.1, """eta""": 0.0, """t_grad_cutoff""": 2, """device""": """cpu""", } if __name__ == "__main__": SCREAMING_SNAKE_CASE : Union[str, Any] = """hopper-medium-v2""" SCREAMING_SNAKE_CASE : int = gym.make(env_name) SCREAMING_SNAKE_CASE : Any = ValueGuidedRLPipeline.from_pretrained( """bglick13/hopper-medium-v2-value-function-hor32""", env=env, ) env.seed(0) SCREAMING_SNAKE_CASE : Any = env.reset() SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : List[str] = 1000 SCREAMING_SNAKE_CASE : Tuple = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy SCREAMING_SNAKE_CASE : int = pipeline(obs, planning_horizon=32) # execute action in environment SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = env.step(denorm_actions) SCREAMING_SNAKE_CASE : Dict = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:' F' {total_score}' ) # save observations for rendering rollout.append(next_observation.copy()) SCREAMING_SNAKE_CASE : Optional[Any] = next_observation except KeyboardInterrupt: pass print(F'Total reward: {total_reward}')
353
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __snake_case : Dict = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''sshleifer/tiny-gpt2''' __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = '''sgugger/tiny-distilbert-classification''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) __snake_case : Optional[Any] = TensorFlowBenchmark(a_ ) __snake_case : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Any = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ ) __snake_case : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''sshleifer/tiny-gpt2''' __snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Dict = TensorFlowBenchmark(a_ , [config] ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : int = TensorFlowBenchmark(a_ ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Dict = AutoConfig.from_pretrained(a_ ) __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' __snake_case : Tuple = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] ) __snake_case : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , ) __snake_case : Union[str, Any] = TensorFlowBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(a_ ): self.assertTrue(hasattr(a_ , '''sequential''' ) ) self.assertTrue(hasattr(a_ , '''cumulative''' ) ) self.assertTrue(hasattr(a_ , '''current''' ) ) self.assertTrue(hasattr(a_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ ) __snake_case : Optional[int] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
24
0
"""simple docstring""" import math import random from typing import Any from .hill_climbing import SearchProblem def lowercase ( _snake_case : Optional[int] , _snake_case : bool = True , _snake_case : float = math.inf , _snake_case : float = -math.inf , _snake_case : float = math.inf , _snake_case : float = -math.inf , _snake_case : bool = False , _snake_case : float = 100 , _snake_case : float = 0.01 , _snake_case : float = 1 , ) ->Any: """simple docstring""" __snake_case : Optional[Any] = False __snake_case : Optional[Any] = search_prob __snake_case : List[str] = start_temperate __snake_case : Any = [] __snake_case : int = 0 __snake_case : List[str] = None while not search_end: __snake_case : List[Any] = current_state.score() if best_state is None or current_score > best_state.score(): __snake_case : Dict = current_state scores.append(_A ) iterations += 1 __snake_case : str = None __snake_case : Dict = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __snake_case : List[Any] = random.randint(0 , len(_A ) - 1 ) # picking a random neighbor __snake_case : Optional[Any] = neighbors.pop(_A ) __snake_case : Optional[int] = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __snake_case : str = change * -1 # in case we are finding minimum if change > 0: # improves the solution __snake_case : Optional[int] = picked_neighbor else: __snake_case : Tuple = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __snake_case : Optional[int] = picked_neighbor __snake_case : int = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __snake_case : List[str] = True else: __snake_case : Optional[Any] = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(_A ) , _A ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def lowercase ( _snake_case : Dict , _snake_case : Optional[int] ) ->Tuple: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) SCREAMING_SNAKE_CASE : Dict = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) SCREAMING_SNAKE_CASE : int = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F'and 50 > y > - 5 found via hill climbing: {local_min.score()}' ) # starting the problem with initial coordinates (12, 47) SCREAMING_SNAKE_CASE : int = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) SCREAMING_SNAKE_CASE : List[Any] = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F'and 50 > y > - 5 found via hill climbing: {local_min.score()}' ) def lowercase ( _snake_case : List[str] , _snake_case : Dict ) ->Tuple: """simple docstring""" return (3 * x**2) - (6 * y) SCREAMING_SNAKE_CASE : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) SCREAMING_SNAKE_CASE : Union[str, Any] = simulated_annealing(prob, find_max=False, visualization=True) print( """The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F'{local_min.score()}' ) SCREAMING_SNAKE_CASE : str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) SCREAMING_SNAKE_CASE : Optional[Any] = simulated_annealing(prob, find_max=True, visualization=True) print( """The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F'{local_min.score()}' )
354
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: SCREAMING_SNAKE_CASE : Tuple = None try: import msvcrt except ImportError: SCREAMING_SNAKE_CASE : List[str] = None try: import fcntl except ImportError: SCREAMING_SNAKE_CASE : Tuple = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: SCREAMING_SNAKE_CASE : List[str] = OSError # Data # ------------------------------------------------ SCREAMING_SNAKE_CASE : List[Any] = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] SCREAMING_SNAKE_CASE : List[Any] = """3.0.12""" SCREAMING_SNAKE_CASE : int = None def lowercase ( ) ->str: """simple docstring""" global _logger __snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ ) return _logger class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[int] = lock_file return None def __str__(self ): '''simple docstring''' __snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired.""" return temp class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = lock return None def __enter__(self ): '''simple docstring''' return self.lock def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.lock.release() return None class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long __snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ ) # The path to the lock file. __snake_case : str = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __snake_case : Dict = None # The default timeout value. __snake_case : List[Any] = timeout # We use this lock primarily for the lock counter. __snake_case : Tuple = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __snake_case : Optional[Any] = 0 return None @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = float(a_ ) return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ): '''simple docstring''' if timeout is None: __snake_case : List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __snake_case : Optional[int] = id(self ) __snake_case : str = self._lock_file __snake_case : Optional[int] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(a_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __snake_case : Optional[int] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE (self , a_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __snake_case : Tuple = id(self ) __snake_case : str = self._lock_file logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __snake_case : Dict = 0 logger().debug(f"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__(self ): '''simple docstring''' self.acquire() return self def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.release() return None def __del__(self ): '''simple docstring''' self.release(force=a_ ) return None def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Any = os.path.basename(a_ ) if len(a_ ) > max_length and max_length > 0: __snake_case : List[Any] = os.path.dirname(a_ ) __snake_case : Any = str(hash(a_ ) ) __snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(a_ , a_ ) else: return path class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) __snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __snake_case : Any = os.open(self._lock_file , a_ ) except OSError: pass else: try: msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(a_ ) else: __snake_case : Dict = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Dict = None msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 ) os.close(a_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC __snake_case : List[str] = os.open(self._lock_file , a_ ) try: fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(a_ ) else: __snake_case : Optional[int] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Tuple = None fcntl.flock(a_ , fcntl.LOCK_UN ) os.close(a_ ) return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __snake_case : Tuple = os.open(self._lock_file , a_ ) except OSError: pass else: __snake_case : List[Any] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' os.close(self._lock_file_fd ) __snake_case : int = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None SCREAMING_SNAKE_CASE : Dict = None if msvcrt: SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock elif fcntl: SCREAMING_SNAKE_CASE : List[str] = UnixFileLock else: SCREAMING_SNAKE_CASE : str = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
24
0
from __future__ import annotations from math import pow, sqrt def lowercase ( _snake_case : float , _snake_case : float , _snake_case : float ) ->List[str]: """simple docstring""" if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance == 0: return {"resistance": sqrt(pow(_snake_case , 2 ) - pow(_snake_case , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(_snake_case , 2 ) - pow(_snake_case , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(_snake_case , 2 ) + pow(_snake_case , 2 ) )} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
355
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ): '''simple docstring''' __snake_case : Any = parent __snake_case : int = batch_size __snake_case : Dict = seq_length __snake_case : List[str] = is_training __snake_case : List[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : str = vocab_size __snake_case : int = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : str = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : Optional[Any] = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Any = scope __snake_case : Any = range_bbox def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : List[str] = bbox[i, j, 3] __snake_case : Any = bbox[i, j, 1] __snake_case : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : List[str] = bbox[i, j, 2] __snake_case : Union[str, Any] = bbox[i, j, 0] __snake_case : Dict = t __snake_case : Optional[int] = None if self.use_input_mask: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case : Dict = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[str] = None __snake_case : Union[str, Any] = None if self.use_labels: __snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ ) __snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ ) __snake_case : List[str] = model(a_ , bbox=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : List[str] = LiltForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Tuple = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Dict = config_and_inputs __snake_case : Any = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ =( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Dict = type self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = LiltModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ ) __snake_case : Dict = torch.tensor([[1, 2]] , device=a_ ) __snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ ) # forward pass with torch.no_grad(): __snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ ) __snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] ) __snake_case : str = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , ) self.assertTrue(outputs.last_hidden_state.shape , a_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
24
0
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Dict = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, "constant": get_constant_schedule, "constant_w_warmup": get_constant_schedule_with_warmup, } class _UpperCAmelCase ( __lowerCAmelCase ): '''simple docstring''' def __init__(self , a_=None , a_=None , *a_ , **a_ ): '''simple docstring''' super().__init__(*lowerCamelCase__ , **lowerCamelCase__ ) if config is None: assert isinstance(self.model , lowerCamelCase__ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f""" {self.model.__class__}""" ) __snake_case : Union[str, Any] = self.model.config else: __snake_case : List[Any] = config __snake_case : Dict = data_args __snake_case : str = self.config.tgt_vocab_size if isinstance(self.config , lowerCamelCase__ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" ''' padding..''' ) if self.args.label_smoothing == 0: __snake_case : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss __snake_case : Optional[Any] = label_smoothed_nll_loss def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if self.optimizer is None: __snake_case : List[str] = ['''bias''', '''LayerNorm.weight'''] __snake_case : Optional[Any] = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] __snake_case : Optional[int] = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: __snake_case : int = Adafactor __snake_case : Union[str, Any] = {'''scale_parameter''': False, '''relative_step''': False} else: __snake_case : Tuple = AdamW __snake_case : List[Any] = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } __snake_case : str = self.args.learning_rate if self.sharded_ddp: __snake_case : str = OSS( params=lowerCamelCase__ , optim=lowerCamelCase__ , **lowerCamelCase__ , ) else: __snake_case : int = optimizer_cls(lowerCamelCase__ , **lowerCamelCase__ ) if self.lr_scheduler is None: __snake_case : Optional[int] = self._get_lr_scheduler(lowerCamelCase__ ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": __snake_case : str = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": __snake_case : str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: __snake_case : str = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowerCamelCase__ ) return scheduler def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token __snake_case : Any = model(**lowerCamelCase__ , use_cache=lowerCamelCase__ )[0] __snake_case : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models __snake_case : List[Any] = model(**lowerCamelCase__ , labels=lowerCamelCase__ , use_cache=lowerCamelCase__ )[:2] else: # compute label smoothed loss __snake_case : Optional[Any] = model(**lowerCamelCase__ , use_cache=lowerCamelCase__ )[0] __snake_case : List[Any] = torch.nn.functional.log_softmax(lowerCamelCase__ , dim=-1 ) __snake_case : Dict = self.loss_fn(lowerCamelCase__ , lowerCamelCase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = inputs.pop('''labels''' ) __snake_case : Tuple = self._compute_loss(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) return loss def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ = None , ): '''simple docstring''' __snake_case : Dict = self._prepare_inputs(lowerCamelCase__ ) __snake_case : Dict = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: __snake_case : Tuple = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **lowerCamelCase__ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: __snake_case : Any = self._pad_tensors_to_max_len(lowerCamelCase__ , gen_kwargs['''max_length'''] ) __snake_case : Optional[Any] = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data __snake_case : str = self._compute_loss(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __snake_case : Union[str, Any] = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) __snake_case : Tuple = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: __snake_case : int = self._pad_tensors_to_max_len(lowerCamelCase__ , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Optional[int] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' f""" padded to `max_length`={max_length}""" ) __snake_case : List[str] = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) __snake_case : Optional[Any] = tensor return padded_tensor
356
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): '''simple docstring''' __snake_case : List[Any] = parent __snake_case : List[Any] = batch_size __snake_case : str = seq_length __snake_case : Any = is_training __snake_case : Any = use_input_mask __snake_case : str = use_token_type_ids __snake_case : Dict = use_labels __snake_case : int = vocab_size __snake_case : Union[str, Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : str = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : str = hidden_act __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : Dict = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : Union[str, Any] = initializer_range __snake_case : str = num_labels __snake_case : Dict = num_choices __snake_case : Optional[int] = scope def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Dict = None if self.use_input_mask: __snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Tuple = None __snake_case : List[str] = None __snake_case : Dict = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = DistilBertModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model(a_ , a_ ) __snake_case : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : Optional[Any] = model( a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Any = self.num_labels __snake_case : Optional[int] = DistilBertForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = self.num_labels __snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = self.num_choices __snake_case : Any = DistilBertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[int] = model( a_ , attention_mask=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs __snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ =( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = DistilBertModelTester(self ) __snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Tuple = DistilBertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __snake_case : List[str] = True __snake_case : Tuple = model_class(config=a_ ) __snake_case : Any = self._prepare_for_class(a_ , a_ ) __snake_case : Dict = torch.jit.trace( a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) ) __snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ ) loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case : List[Any] = model(a_ , attention_mask=a_ )[0] __snake_case : Tuple = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , a_ ) __snake_case : Optional[int] = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
24
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : Optional[Any] = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[str] = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
357
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]: """simple docstring""" def get_masked_lm_array(_snake_case : str ): __snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : str = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Any = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_array(_snake_case : str ): __snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_layer_array(_snake_case : int , _snake_case : str ): __snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[Any] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ): __snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case ) __snake_case : int = array.reshape(_snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) print(f"""Loading model based on config from {config_path}...""" ) __snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case ) __snake_case : Dict = BertForMaskedLM(_snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __snake_case : BertSelfAttention = layer.attention.self __snake_case : int = get_encoder_attention_layer_array( _snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __snake_case : List[Any] = get_encoder_attention_layer_array( _snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __snake_case : Union[str, Any] = get_encoder_attention_layer_array( _snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case : BertSelfOutput = layer.attention.output __snake_case : Dict = get_encoder_attention_layer_array( _snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' ) __snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' ) # Intermediate __snake_case : BertIntermediate = layer.intermediate __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' ) __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' ) # Output __snake_case : BertOutput = layer.output __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' ) __snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' ) __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' ) __snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' ) # Embeddings __snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' ) __snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' ) __snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' ) __snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __snake_case : Optional[Any] = model.cls.predictions.transform __snake_case : Dict = get_masked_lm_array('''dense/kernel''' ) __snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' ) __snake_case : str = get_masked_lm_array('''layer_norm/gamma''' ) __snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' ) __snake_case : Tuple = get_masked_lm_array('''embedding_table''' ) # Pooling __snake_case : Optional[Any] = BertPooler(config=_snake_case ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(_snake_case ) # Integration test - should load without any errors ;) __snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
0
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): SCREAMING_SNAKE_CASE : Optional[Any] = 'pt' elif is_tf_available(): SCREAMING_SNAKE_CASE : List[str] = 'tf' else: SCREAMING_SNAKE_CASE : List[Any] = 'jax' class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =PerceiverTokenizer lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().setUp() __snake_case : Any = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def SCREAMING_SNAKE_CASE (self , **a_ ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_=False , a_=20 , a_=5 ): '''simple docstring''' __snake_case : Dict = [] for i in range(len(a_ ) ): try: __snake_case : Optional[int] = tokenizer.decode([i] , clean_up_tokenization_spaces=a_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) __snake_case : str = list(filter(lambda a_ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , a_ ) ) __snake_case : Dict = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a_ ) , a_ ) ) if max_length is not None and len(a_ ) > max_length: __snake_case : List[str] = toks[:max_length] if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0: while len(a_ ) < min_length: __snake_case : int = toks + toks # toks_str = [t[1] for t in toks] __snake_case : Optional[int] = [t[0] for t in toks] # Ensure consistency __snake_case : Optional[Any] = tokenizer.decode(a_ , clean_up_tokenization_spaces=a_ ) if " " not in output_txt and len(a_ ) > 1: __snake_case : Optional[int] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a_ ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a_ ) ) if with_prefix_space: __snake_case : Any = ' ' + output_txt __snake_case : List[Any] = tokenizer.encode(a_ , add_special_tokens=a_ ) return output_txt, output_ids def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.perceiver_tokenizer __snake_case : Optional[Any] = 'Unicode €.' __snake_case : List[str] = tokenizer(a_ ) __snake_case : Tuple = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5] self.assertEqual(encoded['''input_ids'''] , a_ ) # decoding __snake_case : Optional[int] = tokenizer.decode(a_ ) self.assertEqual(a_ , '''[CLS]Unicode €.[SEP]''' ) __snake_case : Optional[int] = tokenizer('''e è é ê ë''' ) __snake_case : Optional[int] = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5] self.assertEqual(encoded['''input_ids'''] , a_ ) # decoding __snake_case : Dict = tokenizer.decode(a_ ) self.assertEqual(a_ , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.perceiver_tokenizer __snake_case : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __snake_case : int = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0] # fmt: on __snake_case : Dict = tokenizer(a_ , padding=a_ , return_tensors=a_ ) self.assertIsInstance(a_ , a_ ) if FRAMEWORK != "jax": __snake_case : List[Any] = list(batch.input_ids.numpy()[0] ) else: __snake_case : Optional[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(a_ , a_ ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = self.perceiver_tokenizer __snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : int = tokenizer(a_ , padding=a_ , return_tensors=a_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , a_ ) self.assertIn('''attention_mask''' , a_ ) self.assertNotIn('''decoder_input_ids''' , a_ ) self.assertNotIn('''decoder_attention_mask''' , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.perceiver_tokenizer __snake_case : Union[str, Any] = [ 'Summary of the text.', 'Another summary.', ] __snake_case : Optional[Any] = tokenizer( text_target=a_ , max_length=32 , padding='''max_length''' , truncation=a_ , return_tensors=a_ ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : int = tempfile.mkdtemp() __snake_case : str = ' He is very happy, UNwant\u00E9d,running' __snake_case : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) __snake_case : Any = tokenizer.__class__.from_pretrained(a_ ) __snake_case : Optional[int] = after_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) shutil.rmtree(a_ ) __snake_case : Any = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : List[Any] = tempfile.mkdtemp() __snake_case : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['''bim''', '''bambam'''] ) __snake_case : Optional[int] = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) __snake_case : Optional[Any] = tokenizer.encode(a_ , add_special_tokens=a_ ) tokenizer.save_pretrained(a_ ) __snake_case : Any = tokenizer.__class__.from_pretrained(a_ ) __snake_case : Optional[int] = after_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(a_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(a_ ) with open(os.path.join(a_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: __snake_case : Optional[Any] = json.load(a_ ) with open(os.path.join(a_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: __snake_case : Any = json.load(a_ ) __snake_case : str = [f"""<extra_id_{i}>""" for i in range(1_25 )] __snake_case : Optional[int] = added_tokens_extra_ids + [ 'an_additional_special_token' ] __snake_case : str = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(a_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(a_ , a_ ) with open(os.path.join(a_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(a_ , a_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Any = tokenizer_class.from_pretrained( a_ , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=a_ )] __snake_case : Tuple = tokenizer_class.from_pretrained( a_ , additional_special_tokens=a_ , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_78] ) , '''�''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.get_tokenizers(fast=a_ , do_lower_case=a_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __snake_case : str = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] __snake_case : Any = tokenizer.convert_tokens_to_string(a_ ) self.assertIsInstance(a_ , a_ )
358
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ): '''simple docstring''' super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ ) __snake_case : Union[str, Any] = Sql( cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = None __snake_case : Dict = None __snake_case : Dict = None __snake_case : List[str] = None self.builder.download_and_prepare( download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , ) # Build dataset for splits __snake_case : Any = self.builder.as_dataset( split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory ) return dataset class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __snake_case : List[str] = dataset __snake_case : Tuple = name __snake_case : Optional[int] = con __snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __snake_case : Dict = num_proc __snake_case : Dict = to_sql_kwargs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ ) __snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ ) __snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ ) __snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs ) return written def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case , __snake_case , __snake_case : Optional[Any] = args __snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __snake_case : Dict = query_table( table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __snake_case : Tuple = batch.to_pandas() __snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ ) return num_rows or len(a_ ) def SCREAMING_SNAKE_CASE (self , a_ , **a_ ): '''simple docstring''' __snake_case : int = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
24
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__) @dataclass class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCamelCase__ =field( default=_lowercase, metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase__ =field( default=_lowercase, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase__ =field( default=_lowercase, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, ) lowerCamelCase__ =field( default=_lowercase, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, ) lowerCamelCase__ =field( default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, ) lowerCamelCase__ =field( default=_lowercase, metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) }, ) @dataclass class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =field(default=_lowercase, metadata={'help': 'The input training data file (a text file).'} ) lowerCamelCase__ =field( default=_lowercase, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, ) lowerCamelCase__ =field( default=_lowercase, metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowerCamelCase__ =field( default=_lowercase, metadata={'help': 'The number of processes to use for the preprocessing.'}, ) lowerCamelCase__ =field( default=_lowercase, metadata={ 'help': ( 'The maximum total input sequence length after tokenization. If passed, sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) }, ) lowerCamelCase__ =field( default=_lowercase, metadata={ 'help': ( 'Whether to pad all samples to the maximum sentence length. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch. More ' 'efficient on GPU but very bad for TPU.' ) }, ) lowerCamelCase__ =field( default=_lowercase, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) }, ) lowerCamelCase__ =field( default=_lowercase, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) }, ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.train_file is not None: __snake_case : Optional[int] = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: __snake_case : str = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =True lowerCamelCase__ =None lowerCamelCase__ =None def __call__(self , a_ ): '''simple docstring''' __snake_case : str = '''label''' if '''label''' in features[0].keys() else '''labels''' __snake_case : List[Any] = [feature.pop(__UpperCamelCase ) for feature in features] __snake_case : Dict = len(__UpperCamelCase ) __snake_case : Optional[int] = len(features[0]['''input_ids'''] ) __snake_case : List[str] = [ [{k: v[i] for k, v in feature.items()} for i in range(__UpperCamelCase )] for feature in features ] __snake_case : Optional[int] = list(chain(*__UpperCamelCase ) ) __snake_case : Union[str, Any] = self.tokenizer.pad( __UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) # Un-flatten __snake_case : List[str] = {k: v.view(__UpperCamelCase , __UpperCamelCase , -1 ) for k, v in batch.items()} # Add back labels __snake_case : List[Any] = torch.tensor(__UpperCamelCase , dtype=torch.intaa ) return batch def lowercase ( ) ->Tuple: """simple docstring""" __snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case : Optional[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_swag''' , a__ , a__ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __snake_case : int = training_args.get_process_log_level() logger.setLevel(a__ ) datasets.utils.logging.set_verbosity(a__ ) transformers.utils.logging.set_verbosity(a__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __snake_case : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __snake_case : Dict = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: __snake_case : Union[str, Any] = {} if data_args.train_file is not None: __snake_case : Tuple = data_args.train_file if data_args.validation_file is not None: __snake_case : List[str] = data_args.validation_file __snake_case : int = data_args.train_file.split('''.''' )[-1] __snake_case : Dict = load_dataset( a__ , data_files=a__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. __snake_case : Tuple = load_dataset( '''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. __snake_case : str = [f"""ending{i}""" for i in range(4 )] __snake_case : Any = '''sent1''' __snake_case : Optional[int] = '''sent2''' if data_args.max_seq_length is None: __snake_case : Optional[int] = tokenizer.model_max_length if max_seq_length > 1_024: logger.warning( '''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value''' ''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can''' ''' override this default with `--block_size xxx`.''' ) __snake_case : str = 1_024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __snake_case : Any = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(_snake_case : Any ): __snake_case : List[str] = [[context] * 4 for context in examples[context_name]] __snake_case : Any = examples[question_header_name] __snake_case : Optional[int] = [ [f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a__ ) ] # Flatten out __snake_case : Optional[Any] = list(chain(*a__ ) ) __snake_case : Any = list(chain(*a__ ) ) # Tokenize __snake_case : int = tokenizer( a__ , a__ , truncation=a__ , max_length=a__ , padding='''max_length''' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(a__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) __snake_case : List[str] = raw_datasets['''train'''] if data_args.max_train_samples is not None: __snake_case : Tuple = min(len(a__ ) , data_args.max_train_samples ) __snake_case : Optional[int] = train_dataset.select(range(a__ ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): __snake_case : List[Any] = train_dataset.map( a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) __snake_case : Union[str, Any] = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: __snake_case : Union[str, Any] = min(len(a__ ) , data_args.max_eval_samples ) __snake_case : Union[str, Any] = eval_dataset.select(range(a__ ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): __snake_case : Optional[Any] = eval_dataset.map( a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator __snake_case : Tuple = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=a__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(_snake_case : List[str] ): __snake_case , __snake_case : Tuple = eval_predictions __snake_case : List[Any] = np.argmax(a__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer __snake_case : Optional[Any] = Trainer( model=a__ , args=a__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=a__ , data_collator=a__ , compute_metrics=a__ , ) # Training if training_args.do_train: __snake_case : Dict = None if training_args.resume_from_checkpoint is not None: __snake_case : int = training_args.resume_from_checkpoint elif last_checkpoint is not None: __snake_case : int = last_checkpoint __snake_case : Optional[int] = trainer.train(resume_from_checkpoint=a__ ) trainer.save_model() # Saves the tokenizer too for easy upload __snake_case : int = train_result.metrics __snake_case : List[str] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(a__ ) ) __snake_case : Tuple = min(a__ , len(a__ ) ) trainer.log_metrics('''train''' , a__ ) trainer.save_metrics('''train''' , a__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __snake_case : Dict = trainer.evaluate() __snake_case : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a__ ) __snake_case : List[str] = min(a__ , len(a__ ) ) trainer.log_metrics('''eval''' , a__ ) trainer.save_metrics('''eval''' , a__ ) __snake_case : Optional[int] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''multiple-choice''', '''dataset_tags''': '''swag''', '''dataset_args''': '''regular''', '''dataset''': '''SWAG''', '''language''': '''en''', } if training_args.push_to_hub: trainer.push_to_hub(**a__ ) else: trainer.create_model_card(**a__ ) def lowercase ( _snake_case : Tuple ) ->Tuple: """simple docstring""" main() if __name__ == "__main__": main()
359
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='lxmert' lowerCamelCase__ ={} def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[Any] = num_attention_heads __snake_case : int = hidden_act __snake_case : int = intermediate_size __snake_case : Any = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : str = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : List[Any] = num_qa_labels __snake_case : int = num_object_labels __snake_case : Optional[Any] = num_attr_labels __snake_case : Union[str, Any] = l_layers __snake_case : Optional[int] = x_layers __snake_case : Optional[int] = r_layers __snake_case : Tuple = visual_feat_dim __snake_case : Optional[int] = visual_pos_dim __snake_case : Dict = visual_loss_normalizer __snake_case : str = task_matched __snake_case : Optional[Any] = task_mask_lm __snake_case : List[str] = task_obj_predict __snake_case : Optional[Any] = task_qa __snake_case : Any = visual_obj_loss __snake_case : int = visual_attr_loss __snake_case : List[Any] = visual_feat_loss __snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**a_ )
24
0
"""simple docstring""" import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( __lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MgpstrTokenizer lowerCamelCase__ =False lowerCamelCase__ ={} lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().setUp() # fmt: off __snake_case : Optional[int] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __snake_case : int = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) __snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' ) def SCREAMING_SNAKE_CASE (self , **a_ ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Optional[int] = '''tester''' __snake_case : Any = '''tester''' return input_text, output_text @unittest.skip('''MGP-STR always lower cases letters.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.get_tokenizers(do_lower_case=UpperCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __snake_case : Optional[Any] = '''[SPECIAL_TOKEN]''' tokenizer.add_special_tokens({'''cls_token''': special_token} ) __snake_case : Optional[int] = tokenizer.encode([special_token] , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(len(UpperCAmelCase__ ) , 1 ) __snake_case : int = tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __snake_case , __snake_case : List[str] = self.get_input_output_texts(UpperCAmelCase__ ) __snake_case : int = tokenizer.tokenize(UpperCAmelCase__ ) __snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) __snake_case : List[Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) __snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertNotEqual(len(UpperCAmelCase__ ) , 0 ) __snake_case : Tuple = tokenizer.decode(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(text_a.replace(''' ''' , '''''' ) , UpperCAmelCase__ ) @unittest.skip('''MGP-STR tokenizer only handles one sequence.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass @unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass
360
"""simple docstring""" def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" __snake_case : Tuple = len(_snake_case ) __snake_case : str = sum(_snake_case ) __snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __snake_case : Optional[Any] = True for i in range(1 , s + 1 ): __snake_case : int = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __snake_case : Union[str, Any] = dp[i][j - 1] if arr[i - 1] <= j: __snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __snake_case : List[str] = s - 2 * j break return diff
24
0
"""simple docstring""" from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Dict[Optional[str], Type[Formatter]] = {} SCREAMING_SNAKE_CASE : Dict[Optional[str], str] = {} SCREAMING_SNAKE_CASE : Dict[Optional[str], Exception] = {} def lowercase ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] = None , ) ->List[str]: """simple docstring""" __snake_case : Dict = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"""Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" ) __snake_case : int = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"""Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" ) __snake_case : Tuple = format_type def lowercase ( _snake_case : int , _snake_case : Optional[int] , _snake_case : Optional[int] = None ) ->List[Any]: """simple docstring""" __snake_case : Optional[int] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __snake_case : Any = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=["""python"""]) _register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""]) _register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""]) _register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""]) _register_formatter(CustomFormatter, """custom""") if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""]) else: SCREAMING_SNAKE_CASE : int = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""") _register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""]) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""]) else: SCREAMING_SNAKE_CASE : Tuple = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""") _register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""]) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, """jax""", aliases=[]) else: SCREAMING_SNAKE_CASE : Dict = ValueError("""JAX needs to be installed to be able to return JAX arrays.""") _register_unavailable_formatter(_jax_error, """jax""", aliases=[]) def lowercase ( _snake_case : List[Any] ) ->Optional[str]: """simple docstring""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def lowercase ( _snake_case : int , **_snake_case : Union[str, Any] ) ->Formatter: """simple docstring""" __snake_case : List[Any] = get_format_type_from_alias(__lowerCAmelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**__lowerCAmelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'""" )
361
"""simple docstring""" from collections.abc import Callable def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: __snake_case : List[str] = mid else: __snake_case : str = mid __snake_case : str = start + (end - start) / 2.0 return mid def lowercase ( _snake_case : float ) ->float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
24
0
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE : Tuple = logging.getLogger() def lowercase ( ) ->Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''-f''' ) __snake_case : Optional[Any] = parser.parse_args() return args.f class _UpperCAmelCase ( _A ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Optional[int] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , '''run_glue_deebert.py''' ) with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ): __snake_case : str = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 ) @slow @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE ) __snake_case : List[str] = '''\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE ) __snake_case : Optional[int] = '''\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE )
362
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[str] = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
0
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowercase ( _snake_case : Tuple , _snake_case : Any=0.999 , _snake_case : Optional[int]="cosine" , ) ->List[Any]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(_snake_case : int ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_snake_case : Tuple ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) __snake_case : List[str] = [] for i in range(__snake_case ): __snake_case : Tuple = i / num_diffusion_timesteps __snake_case : Dict = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) ) return torch.tensor(__snake_case , dtype=torch.floataa ) class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ =[e.name for e in KarrasDiffusionSchedulers] lowerCamelCase__ =2 @register_to_config def __init__(self , a_ = 10_00 , a_ = 0.0_0085 , a_ = 0.012 , a_ = "linear" , a_ = None , a_ = "epsilon" , a_ = "linspace" , a_ = 0 , ): '''simple docstring''' if trained_betas is not None: __snake_case : Any = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) elif beta_schedule == "linear": __snake_case : Optional[Any] = torch.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __snake_case : Optional[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __snake_case : Optional[Any] = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) __snake_case : Union[str, Any] = 1.0 - self.betas __snake_case : Dict = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE (self , a_ , a_=None ): '''simple docstring''' if schedule_timesteps is None: __snake_case : List[Any] = self.timesteps __snake_case : Any = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __snake_case : int = 1 if len(_SCREAMING_SNAKE_CASE ) > 1 else 0 else: __snake_case : str = timestep.cpu().item() if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else timestep __snake_case : Optional[Any] = self._index_counter[timestep_int] return indices[pos].item() @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def SCREAMING_SNAKE_CASE (self , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[int] = self.index_for_timestep(_SCREAMING_SNAKE_CASE ) if self.state_in_first_order: __snake_case : Union[str, Any] = self.sigmas[step_index] else: __snake_case : str = self.sigmas_interpol[step_index] __snake_case : List[str] = sample / ((sigma**2 + 1) ** 0.5) return sample def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = None , ): '''simple docstring''' __snake_case : int = num_inference_steps __snake_case : Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __snake_case : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )[::-1].copy() elif self.config.timestep_spacing == "leading": __snake_case : Optional[int] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __snake_case : Optional[int] = (np.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(_SCREAMING_SNAKE_CASE ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __snake_case : List[Any] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __snake_case : Tuple = (np.arange(_SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(_SCREAMING_SNAKE_CASE ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) __snake_case : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __snake_case : Union[str, Any] = torch.from_numpy(np.log(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE ) __snake_case : Optional[int] = np.interp(_SCREAMING_SNAKE_CASE , np.arange(0 , len(_SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE ) __snake_case : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __snake_case : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ) # interpolate sigmas __snake_case : Optional[Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() __snake_case : str = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) __snake_case : List[Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ): # mps does not support float64 __snake_case : Any = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) else: __snake_case : Optional[int] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) # interpolate timesteps __snake_case : List[str] = self.sigma_to_t(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE , dtype=timesteps.dtype ) __snake_case : Tuple = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() __snake_case : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps] ) __snake_case : List[str] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __snake_case : List[str] = defaultdict(_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : str = sigma.log() # get distribution __snake_case : Optional[int] = log_sigma - self.log_sigmas[:, None] # get sigmas range __snake_case : List[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) __snake_case : List[str] = low_idx + 1 __snake_case : str = self.log_sigmas[low_idx] __snake_case : str = self.log_sigmas[high_idx] # interpolate sigmas __snake_case : Dict = (low - log_sigma) / (low - high) __snake_case : Union[str, Any] = w.clamp(0 , 1 ) # transform interpolation to time range __snake_case : List[Any] = (1 - w) * low_idx + w * high_idx __snake_case : Union[str, Any] = t.view(sigma.shape ) return t @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.sample is None def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ = True , ): '''simple docstring''' __snake_case : Tuple = self.index_for_timestep(_SCREAMING_SNAKE_CASE ) # advance index counter by 1 __snake_case : Dict = timestep.cpu().item() if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __snake_case : List[str] = self.sigmas[step_index] __snake_case : Optional[int] = self.sigmas_interpol[step_index + 1] __snake_case : Any = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method __snake_case : List[Any] = self.sigmas[step_index - 1] __snake_case : Optional[int] = self.sigmas_interpol[step_index] __snake_case : Optional[int] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __snake_case : Union[str, Any] = 0 __snake_case : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __snake_case : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol __snake_case : Tuple = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __snake_case : Optional[int] = sigma_hat if self.state_in_first_order else sigma_interpol __snake_case : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('''prediction_type not implemented yet: sample''' ) else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __snake_case : int = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __snake_case : str = sigma_interpol - sigma_hat # store for 2nd order step __snake_case : Dict = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order __snake_case : Any = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep __snake_case : List[str] = sigma_next - sigma_hat __snake_case : str = self.sample __snake_case : Union[str, Any] = None __snake_case : Union[str, Any] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(_SCREAMING_SNAKE_CASE ): # mps does not support float64 __snake_case : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __snake_case : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __snake_case : Tuple = self.timesteps.to(original_samples.device ) __snake_case : Optional[int] = timesteps.to(original_samples.device ) __snake_case : List[Any] = [self.index_for_timestep(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for t in timesteps] __snake_case : Union[str, Any] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __snake_case : Union[str, Any] = sigma.unsqueeze(-1 ) __snake_case : Optional[Any] = original_samples + noise * sigma return noisy_samples def __len__(self ): '''simple docstring''' return self.config.num_train_timesteps
363
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='CLIPImageProcessor' lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' ) __snake_case : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a_ , a_ ) def __call__(self , a_=None , a_=None , a_=None , **a_ ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: __snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: __snake_case : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*a_ , **a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*a_ , **a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
0
"""simple docstring""" import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json SCREAMING_SNAKE_CASE : Any = """sshleifer/mar_enro_6_3_student""" class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().setUp() __snake_case : List[Any] = cached_path( '''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=lowerCamelCase_ , ) __snake_case : Any = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k""" @slow @require_torch_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' MarianMTModel.from_pretrained(lowerCamelCase_ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = { """$MAX_LEN""": 64, """$BS""": 64, """$GAS""": 1, """$ENRO_DIR""": self.data_dir, """facebook/mbart-large-cc25""": MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", """--learning_rate=3e-5""": """--learning_rate 3e-4""", """--num_train_epochs 6""": """--num_train_epochs 1""", } # Clean up bash script __snake_case : Optional[int] = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split('''finetune.py''' )[1].strip() __snake_case : List[str] = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''\"$@\"''' , '''''' ) for k, v in env_vars_to_replace.items(): __snake_case : Union[str, Any] = bash_script.replace(lowerCamelCase_ , str(lowerCamelCase_ ) ) __snake_case : Any = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") __snake_case : Any = f""" --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 """.split() # XXX: args.gpus > 1 : handle multi_gpu in the future __snake_case : Tuple = ["""finetune.py"""] + bash_script.split() + args with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_ ): __snake_case : List[Any] = argparse.ArgumentParser() __snake_case : int = pl.Trainer.add_argparse_args(lowerCamelCase_ ) __snake_case : Dict = SummarizationModule.add_model_specific_args(lowerCamelCase_ , os.getcwd() ) __snake_case : int = parser.parse_args() __snake_case : Optional[int] = main(lowerCamelCase_ ) # Check metrics __snake_case : Optional[Any] = load_json(model.metrics_save_path ) __snake_case : Any = metrics["""val"""][0] __snake_case : Any = metrics["""val"""][-1] self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , lowerCamelCase_ ) self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict __snake_case : Any = os.listdir(lowerCamelCase_ ) __snake_case : List[str] = [x for x in contents if x.endswith('''.ckpt''' )][0] __snake_case : str = os.path.join(args.output_dir , lowerCamelCase_ ) __snake_case : List[Any] = torch.load(lowerCamelCase_ , map_location='''cpu''' ) __snake_case : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight""" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: __snake_case : Any = {os.path.basename(lowerCamelCase_ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1 class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @timeout_decorator.timeout(6_00 ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = f"""{self.test_file_dir_str}/test_data/wmt_en_ro""" __snake_case : Dict = { """--fp16_opt_level=O1""": """""", """$MAX_LEN""": 1_28, """$BS""": 16, """$GAS""": 1, """$ENRO_DIR""": data_dir, """$m""": """sshleifer/student_marian_en_ro_6_1""", """val_check_interval=0.25""": """val_check_interval=1.0""", } # Clean up bash script __snake_case : str = ( (self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split('''distillation.py''' )[1].strip() ) __snake_case : Dict = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''\"$@\"''' , '''''' ) __snake_case : Tuple = bash_script.replace('''--fp16 ''' , ''' ''' ) for k, v in env_vars_to_replace.items(): __snake_case : Dict = bash_script.replace(lowerCamelCase_ , str(lowerCamelCase_ ) ) __snake_case : Optional[Any] = self.get_auto_remove_tmp_dir() __snake_case : str = bash_script.replace('''--fp16''' , '''''' ) __snake_case : List[Any] = 6 __snake_case : List[str] = ( ["""distillation.py"""] + bash_script.split() + [ f"""--output_dir={output_dir}""", """--gpus=1""", """--learning_rate=1e-3""", f"""--num_train_epochs={epochs}""", """--warmup_steps=10""", """--val_check_interval=1.0""", """--do_predict""", ] ) with patch.object(lowerCamelCase_ , '''argv''' , lowerCamelCase_ ): __snake_case : Any = argparse.ArgumentParser() __snake_case : str = pl.Trainer.add_argparse_args(lowerCamelCase_ ) __snake_case : Optional[Any] = SummarizationDistiller.add_model_specific_args(lowerCamelCase_ , os.getcwd() ) __snake_case : Tuple = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu __snake_case : int = distill_main(lowerCamelCase_ ) # Check metrics __snake_case : Dict = load_json(model.metrics_save_path ) __snake_case : Any = metrics["""val"""][0] __snake_case : Union[str, Any] = metrics["""val"""][-1] assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , lowerCamelCase_ ) # check lightning ckpt can be loaded and has a reasonable statedict __snake_case : Optional[Any] = os.listdir(lowerCamelCase_ ) __snake_case : Union[str, Any] = [x for x in contents if x.endswith('''.ckpt''' )][0] __snake_case : Optional[Any] = os.path.join(args.output_dir , lowerCamelCase_ ) __snake_case : Optional[Any] = torch.load(lowerCamelCase_ , map_location='''cpu''' ) __snake_case : Optional[Any] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight""" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: __snake_case : Union[str, Any] = {os.path.basename(lowerCamelCase_ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1
364
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE : List[Any] = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE : Tuple = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =MBartTokenizer lowerCamelCase__ =[] lowerCamelCase__ =[] def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token super().__init__( vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , ) __snake_case : Tuple = vocab_file __snake_case : Optional[Any] = False if not self.vocab_file else True __snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __snake_case : Optional[int] = { lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX''' __snake_case : Any = self.convert_tokens_to_ids(self._src_lang ) __snake_case : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Tuple = [self.sep_token_id] __snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __snake_case : Optional[int] = src_lang __snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ ) __snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ ) __snake_case : int = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ): '''simple docstring''' __snake_case : int = src_lang __snake_case : List[Any] = tgt_lang return super().prepare_seqaseq_batch(a_ , a_ , **a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : List[Any] = [] __snake_case : Any = [self.eos_token_id, self.cur_lang_code] __snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : Optional[Any] = [] __snake_case : Dict = [self.eos_token_id, self.cur_lang_code] __snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return __snake_case : Optional[Any] = os.path.join( a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
24
0
"""simple docstring""" def lowercase ( _snake_case : List[str] , _snake_case : int , _snake_case : Union[str, Any] ) ->int: """simple docstring""" if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise ValueError('''The length of profit and weight must be same.''' ) if max_weight <= 0: raise ValueError('''max_weight must greater than zero.''' ) if any(p < 0 for p in profit ): raise ValueError('''Profit can not be negative.''' ) if any(w < 0 for w in weight ): raise ValueError('''Weight can not be negative.''' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. __snake_case : Union[str, Any] = [p / w for p, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] # Creating a copy of the list and sorting profit/weight in ascending order __snake_case : int = sorted(SCREAMING_SNAKE_CASE_ ) # declaring useful variables __snake_case : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) __snake_case : Optional[Any] = 0 __snake_case : Optional[Any] = 0 __snake_case : List[str] = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight __snake_case : Optional[int] = sorted_profit_by_weight[length - i - 1] __snake_case : int = profit_by_weight.index(SCREAMING_SNAKE_CASE_ ) __snake_case : int = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( """Input profits, weights, and then max_weight (all positive ints) separated by """ """spaces.""" ) SCREAMING_SNAKE_CASE : List[str] = [int(x) for x in input("""Input profits separated by spaces: """).split()] SCREAMING_SNAKE_CASE : Dict = [int(x) for x in input("""Input weights separated by spaces: """).split()] SCREAMING_SNAKE_CASE : str = int(input("""Max weight allowed: """)) # Function Call calc_profit(profit, weight, max_weight)
365
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__) @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None if is_torch_available(): import torch from torch.utils.data import Dataset class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ): '''simple docstring''' __snake_case : Any = hans_processors[task]() __snake_case : int = os.path.join( a_ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , ) __snake_case : Tuple = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Dict = label_list[2], label_list[1] __snake_case : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case : int = cached_features_file + '''.lock''' with FileLock(a_ ): if os.path.exists(a_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case : Union[str, Any] = torch.load(a_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case : Dict = ( processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) ) logger.info('''Training examples: %s''' , len(a_ ) ) __snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) logger.info('''Saving features into cached file %s''' , a_ ) torch.save(self.features , a_ ) def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ): '''simple docstring''' __snake_case : List[Any] = hans_processors[task]() __snake_case : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Tuple = label_list[2], label_list[1] __snake_case : Dict = label_list __snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) __snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case : Union[str, Any] = tf.data.Dataset.from_generator( a_ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.dataset def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = [] for i, line in enumerate(a_ ): if i == 0: continue __snake_case : Tuple = '''%s-%s''' % (set_type, line[0]) __snake_case : Dict = line[5] __snake_case : int = line[6] __snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case : List[Any] = line[0] examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) ) return examples def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )} __snake_case : Tuple = [] for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , ) __snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0 __snake_case : Union[str, Any] = int(example.pairID ) features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE : Dict = { """hans""": 3, } SCREAMING_SNAKE_CASE : str = { """hans""": HansProcessor, }
24
0
"""simple docstring""" from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( 'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ', lowerCamelCase_, ) class _UpperCAmelCase ( lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ =RobertaConfig lowerCamelCase__ ="""roberta""" def __init__(self , a_ ): '''simple docstring''' super().__init__(_UpperCAmelCase ) __snake_case : int = RobertaEmbeddings(_UpperCAmelCase ) self.init_weights() @add_start_docstrings( 'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ', lowerCamelCase_, ) class _UpperCAmelCase ( lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ =RobertaConfig lowerCamelCase__ ="""roberta""" def __init__(self , a_ ): '''simple docstring''' super().__init__(_UpperCAmelCase ) __snake_case : Any = config.num_labels __snake_case : Any = config.num_hidden_layers __snake_case : Dict = DeeRobertaModel(_UpperCAmelCase ) __snake_case : List[str] = nn.Dropout(config.hidden_dropout_prob ) __snake_case : List[str] = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE (self , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=-1 , a_=False , ): '''simple docstring''' __snake_case : Dict = self.num_layers try: __snake_case : Tuple = self.roberta( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , ) __snake_case : str = outputs[1] __snake_case : int = self.dropout(_UpperCAmelCase ) __snake_case : Union[str, Any] = self.classifier(_UpperCAmelCase ) __snake_case : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __snake_case : List[str] = e.message __snake_case : Optional[int] = e.exit_layer __snake_case : Any = outputs[0] if not self.training: __snake_case : List[str] = entropy(_UpperCAmelCase ) __snake_case : Any = [] __snake_case : Tuple = [] if labels is not None: if self.num_labels == 1: # We are doing regression __snake_case : Dict = MSELoss() __snake_case : Optional[int] = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: __snake_case : Union[str, Any] = CrossEntropyLoss() __snake_case : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits __snake_case : List[str] = [] for highway_exit in outputs[-1]: __snake_case : Any = highway_exit[0] if not self.training: highway_logits_all.append(_UpperCAmelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __snake_case : Optional[int] = MSELoss() __snake_case : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: __snake_case : str = CrossEntropyLoss() __snake_case : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_UpperCAmelCase ) if train_highway: __snake_case : Optional[Any] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __snake_case : List[str] = (loss,) + outputs if not self.training: __snake_case : str = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __snake_case : Any = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
366
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='gptsan-japanese' lowerCamelCase__ =[ 'past_key_values', ] lowerCamelCase__ ={ 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ): '''simple docstring''' __snake_case : Any = vocab_size __snake_case : str = max_position_embeddings __snake_case : Any = d_model __snake_case : List[str] = d_ff __snake_case : Dict = d_ext __snake_case : Optional[Any] = d_spout __snake_case : int = num_switch_layers __snake_case : List[Any] = num_ext_layers __snake_case : Any = num_switch_layers + num_ext_layers __snake_case : Optional[int] = num_heads __snake_case : Tuple = num_experts __snake_case : List[Any] = expert_capacity __snake_case : Dict = dropout_rate __snake_case : Optional[Any] = layer_norm_epsilon __snake_case : Dict = router_bias __snake_case : str = router_jitter_noise __snake_case : List[str] = router_dtype __snake_case : Union[str, Any] = router_ignore_padding_tokens __snake_case : List[str] = output_hidden_states __snake_case : Optional[Any] = output_attentions __snake_case : Any = initializer_factor __snake_case : int = output_router_logits __snake_case : Union[str, Any] = use_cache super().__init__( separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
24
0
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _UpperCAmelCase ( _a ): '''simple docstring''' def __init__(self , a_ , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , **a_ , ): '''simple docstring''' super().__init__( features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , ) __snake_case : Optional[int] = Generator( cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.streaming: __snake_case : int = self.builder.as_streaming_dataset(split='''train''' ) # Build regular (map-style) dataset else: __snake_case : int = None __snake_case : List[str] = None __snake_case : List[str] = None __snake_case : str = None self.builder.download_and_prepare( download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , ) __snake_case : Optional[int] = self.builder.as_dataset( split='''train''' , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory ) return dataset
367
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } SCREAMING_SNAKE_CASE : int = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowercase ( _snake_case : Optional[int] ) ->int: """simple docstring""" __snake_case : int = {} with open(_snake_case , '''r''' ) as file: for line_number, line in enumerate(_snake_case ): __snake_case : Union[str, Any] = line.strip() if line: __snake_case : str = line.split() __snake_case : Union[str, Any] = line_number __snake_case : Dict = words[0] __snake_case : str = value return result def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]: """simple docstring""" for attribute in key.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : str = '''param''' if weight_type is not None and weight_type != "param": __snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape elif weight_type is not None and weight_type == "param": __snake_case : Optional[Any] = hf_pointer for attribute in hf_param_name.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : List[str] = shape_pointer.shape # let's reduce dimension __snake_case : int = value[0] else: __snake_case : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : List[Any] = value elif weight_type == "weight_g": __snake_case : Tuple = value elif weight_type == "weight_v": __snake_case : str = value elif weight_type == "bias": __snake_case : str = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __snake_case : List[Any] = getattr(_snake_case , _snake_case ) __snake_case : int = value else: __snake_case : List[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int: """simple docstring""" __snake_case : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : List[str] = '''param''' if weight_type is not None and weight_type != "param": __snake_case : str = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __snake_case : Tuple = '''.'''.join([key, hf_param_name] ) else: __snake_case : Optional[int] = key __snake_case : List[Any] = value if '''lm_head''' in full_key else value[0] SCREAMING_SNAKE_CASE : Tuple = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict: """simple docstring""" __snake_case : Tuple = False for key, mapped_key in MAPPING.items(): __snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case : int = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2] __snake_case : Tuple = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: __snake_case : Union[str, Any] = '''weight_g''' elif "weight_v" in name: __snake_case : List[str] = '''weight_v''' elif "bias" in name: __snake_case : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case : List[Any] = '''weight''' else: __snake_case : Union[str, Any] = None if hf_dict is not None: rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) else: set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) return is_used return is_used def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any: """simple docstring""" __snake_case : Union[str, Any] = [] __snake_case : Union[str, Any] = fairseq_model.state_dict() __snake_case : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __snake_case : str = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case : Union[str, Any] = True else: __snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case ) if not is_used: unused_weights.append(_snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] __snake_case : str = name.split('''.''' ) __snake_case : Optional[int] = int(items[0] ) __snake_case : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_snake_case ) @torch.no_grad() def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict: """simple docstring""" if config_path is not None: __snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case ) else: __snake_case : Tuple = WavaVecaConfig() if is_seq_class: __snake_case : Optional[int] = read_txt_into_dict(_snake_case ) __snake_case : List[Any] = idalabel __snake_case : int = WavaVecaForSequenceClassification(_snake_case ) __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) feature_extractor.save_pretrained(_snake_case ) elif is_finetuned: if dict_path: __snake_case : int = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Tuple = target_dict.pad_index __snake_case : int = target_dict.bos_index __snake_case : Tuple = target_dict.eos_index __snake_case : Optional[Any] = len(target_dict.symbols ) __snake_case : Any = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) __snake_case : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case : Dict = 0 __snake_case : List[Any] = 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_snake_case , _snake_case ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) __snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False __snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) __snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) __snake_case : Optional[int] = WavaVecaForCTC(_snake_case ) else: __snake_case : Tuple = WavaVecaForPreTraining(_snake_case ) if is_finetuned or is_seq_class: __snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' ) __snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case ) __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case ) __snake_case : int = model[0].eval() recursively_load_weights(_snake_case , _snake_case , not is_finetuned ) hf_wavavec.save_pretrained(_snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) SCREAMING_SNAKE_CASE : Any = parser.parse_args() SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
24
0
"""simple docstring""" from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ = None ): '''simple docstring''' if components is None: __snake_case : int = [] __snake_case : Optional[Any] = list(lowercase_ ) def __len__(self ): '''simple docstring''' return len(self.__components ) def __str__(self ): '''simple docstring''' return "(" + ",".join(map(lowercase_ , self.__components ) ) + ")" def __add__(self , a_ ): '''simple docstring''' __snake_case : Optional[int] = len(self ) if size == len(lowercase_ ): __snake_case : Dict = [self.__components[i] + other.component(lowercase_ ) for i in range(lowercase_ )] return Vector(lowercase_ ) else: raise Exception('''must have the same size''' ) def __sub__(self , a_ ): '''simple docstring''' __snake_case : Any = len(self ) if size == len(lowercase_ ): __snake_case : Any = [self.__components[i] - other.component(lowercase_ ) for i in range(lowercase_ )] return Vector(lowercase_ ) else: # error case raise Exception('''must have the same size''' ) @overload def __mul__(self , a_ ): '''simple docstring''' ... @overload def __mul__(self , a_ ): '''simple docstring''' ... def __mul__(self , a_ ): '''simple docstring''' if isinstance(lowercase_ , (float, int) ): __snake_case : int = [c * other for c in self.__components] return Vector(lowercase_ ) elif isinstance(lowercase_ , lowercase_ ) and len(self ) == len(lowercase_ ): __snake_case : Union[str, Any] = len(self ) __snake_case : List[str] = [self.__components[i] * other.component(lowercase_ ) for i in range(lowercase_ )] return sum(lowercase_ ) else: # error case raise Exception('''invalid operand!''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return Vector(self.__components ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if isinstance(lowercase_ , lowercase_ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('''index out of range''' ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' assert -len(self.__components ) <= pos < len(self.__components ) __snake_case : str = value def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if len(self.__components ) == 0: raise Exception('''Vector is empty''' ) __snake_case : List[str] = [c**2 for c in self.__components] return math.sqrt(sum(lowercase_ ) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ): '''simple docstring''' __snake_case : str = self * other __snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def lowercase ( _snake_case : Tuple ) ->Vector: """simple docstring""" assert isinstance(a__ , a__ ) return Vector([0] * dimension ) def lowercase ( _snake_case : int , _snake_case : Dict ) ->Vector: """simple docstring""" assert isinstance(a__ , a__ ) and (isinstance(a__ , a__ )) __snake_case : str = [0] * dimension __snake_case : List[str] = 1 return Vector(a__ ) def lowercase ( _snake_case : Dict , _snake_case : List[Any] , _snake_case : str ) ->Vector: """simple docstring""" assert ( isinstance(a__ , a__ ) and isinstance(a__ , a__ ) and (isinstance(a__ , (int, float) )) ) return x * scalar + y def lowercase ( _snake_case : List[str] , _snake_case : Any , _snake_case : List[Any] ) ->Vector: """simple docstring""" random.seed(a__ ) __snake_case : List[Any] = [random.randint(a__ , a__ ) for _ in range(a__ )] return Vector(a__ ) class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : str = matrix __snake_case : List[Any] = w __snake_case : Dict = h def __str__(self ): '''simple docstring''' __snake_case : Optional[int] = '''''' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__(self , a_ ): '''simple docstring''' if self.__width == other.width() and self.__height == other.height(): __snake_case : Union[str, Any] = [] for i in range(self.__height ): __snake_case : List[Any] = [ self.__matrix[i][j] + other.component(lowercase_ , lowercase_ ) for j in range(self.__width ) ] matrix.append(lowercase_ ) return Matrix(lowercase_ , self.__width , self.__height ) else: raise Exception('''matrix must have the same dimension!''' ) def __sub__(self , a_ ): '''simple docstring''' if self.__width == other.width() and self.__height == other.height(): __snake_case : Dict = [] for i in range(self.__height ): __snake_case : Any = [ self.__matrix[i][j] - other.component(lowercase_ , lowercase_ ) for j in range(self.__width ) ] matrix.append(lowercase_ ) return Matrix(lowercase_ , self.__width , self.__height ) else: raise Exception('''matrices must have the same dimension!''' ) @overload def __mul__(self , a_ ): '''simple docstring''' ... @overload def __mul__(self , a_ ): '''simple docstring''' ... def __mul__(self , a_ ): '''simple docstring''' if isinstance(lowercase_ , lowercase_ ): # matrix-vector if len(lowercase_ ) == self.__width: __snake_case : Optional[Any] = zero_vector(self.__height ) for i in range(self.__height ): __snake_case : Optional[int] = [ self.__matrix[i][j] * other.component(lowercase_ ) for j in range(self.__width ) ] ans.change_component(lowercase_ , sum(lowercase_ ) ) return ans else: raise Exception( '''vector must have the same size as the ''' '''number of columns of the matrix!''' ) elif isinstance(lowercase_ , (int, float) ): # matrix-scalar __snake_case : str = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(lowercase_ , self.__width , self.__height ) return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.__height def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.__width def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('''change_component: indices out of bounds''' ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' if 0 <= x < self.__height and 0 <= y < self.__width: __snake_case : Dict = value else: raise Exception('''change_component: indices out of bounds''' ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' if self.__height != self.__width: raise Exception('''Matrix is not square''' ) __snake_case : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(lowercase_ ) ): __snake_case : Union[str, Any] = minor[i][:y] + minor[i][y + 1 :] return Matrix(lowercase_ , self.__width - 1 , self.__height - 1 ).determinant() def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' if self.__height != self.__width: raise Exception('''Matrix is not square''' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(lowercase_ , lowercase_ ) else: raise Exception('''Indices out of bounds''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.__height != self.__width: raise Exception('''Matrix is not square''' ) if self.__height < 1: raise Exception('''Matrix has no element''' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __snake_case : str = [ self.__matrix[0][y] * self.cofactor(0 , lowercase_ ) for y in range(self.__width ) ] return sum(lowercase_ ) def lowercase ( _snake_case : List[Any] ) ->Matrix: """simple docstring""" __snake_case : Any = [[0] * n for _ in range(a__ )] return Matrix(a__ , a__ , a__ ) def lowercase ( _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) ->Matrix: """simple docstring""" random.seed(a__ ) __snake_case : List[Any] = [ [random.randint(a__ , a__ ) for _ in range(a__ )] for _ in range(a__ ) ] return Matrix(a__ , a__ , a__ )
368
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=__snake_case ): '''simple docstring''' lowerCamelCase__ =['transformers', 'torch', 'note_seq'] def __init__(self , *a_ , **a_ ): '''simple docstring''' requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
24
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ): '''simple docstring''' __snake_case : Optional[Any] = size if size is not None else {"shortest_edge": 20} __snake_case : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18} __snake_case : str = parent __snake_case : Dict = batch_size __snake_case : Optional[Any] = num_channels __snake_case : int = image_size __snake_case : List[str] = min_resolution __snake_case : Dict = max_resolution __snake_case : str = do_resize __snake_case : Union[str, Any] = size __snake_case : Any = do_center_crop __snake_case : str = crop_size __snake_case : Union[str, Any] = do_flip_channel_order def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase ( a__, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , '''do_resize''' ) ) self.assertTrue(hasattr(a_ , '''size''' ) ) self.assertTrue(hasattr(a_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a_ , '''center_crop''' ) ) self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input __snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : List[str] = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Optional[int] = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input __snake_case : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
369
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ): '''simple docstring''' __snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20} __snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __snake_case : Tuple = parent __snake_case : Tuple = batch_size __snake_case : Tuple = num_channels __snake_case : List[str] = image_size __snake_case : Optional[Any] = min_resolution __snake_case : List[Any] = max_resolution __snake_case : List[Any] = do_resize __snake_case : Dict = size __snake_case : Dict = do_center_crop __snake_case : Dict = crop_size __snake_case : str = do_flip_channel_order def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , '''do_resize''' ) ) self.assertTrue(hasattr(a_ , '''size''' ) ) self.assertTrue(hasattr(a_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a_ , '''center_crop''' ) ) self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input __snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
24
0
"""simple docstring""" from collections import defaultdict from math import gcd def lowercase ( _snake_case : Union[str, Any] = 1_500_000 ) ->Optional[Any]: """simple docstring""" __snake_case : defaultdict = defaultdict(_snake_case ) __snake_case : str = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , _snake_case , 2 ): if gcd(_snake_case , _snake_case ) > 1: continue __snake_case : Optional[Any] = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(_snake_case , limit + 1 , _snake_case ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F'{solution() = }')
370
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : int = torch.nn.Linear(2 , 4 ) __snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) __snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) __snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) __snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def lowercase ( _snake_case : str ) ->Optional[Any]: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def lowercase ( _snake_case : Union[str, Any] ) ->Tuple: """simple docstring""" __snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_snake_case ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a_ ): __snake_case : Any = Accelerator(cpu=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case : Optional[int] = GradientState() assert state.num_steps == 1 __snake_case : str = 4 assert state.num_steps == 4 assert state.sync_gradients is True __snake_case : List[Any] = False assert state.sync_gradients is False GradientState._reset_state() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a_ , **a_ ): pass with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ): __snake_case : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : Any = get_signature(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : List[Any] = get_signature(a_ ) # saving hook def save_config(a_ , a_ , a_ ): __snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f: json.dump(a_ , a_ ) # loading hook def load_config(a_ , a_ ): with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f: __snake_case : Any = json.load(a_ ) __snake_case : List[str] = config['''class_name'''] __snake_case : str = accelerator.register_save_state_pre_hook(a_ ) __snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Any = '''random''' # make sure loaded weights match with hooks accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks removed load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Union[str, Any] = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components() __snake_case : Union[str, Any] = None # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertTrue(dummy_obj is None ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() __snake_case : Optional[int] = [1, 2, 3] # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , ) __snake_case : Optional[Any] = Accelerator() # This should work __snake_case : Any = accelerator.prepare(a_ ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Any = Accelerator() with init_empty_weights(): __snake_case : List[str] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : Union[str, Any] = infer_auto_device_map(a_ ) __snake_case : str = '''cpu''' __snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ ) # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Dict = accelerator.prepare(a_ ) @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): __snake_case : Any = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : List[Any] = infer_auto_device_map(a_ ) __snake_case : Dict = 1 __snake_case : str = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Any = Accelerator() # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Tuple = accelerator.prepare(a_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) __snake_case : Tuple = infer_auto_device_map(a_ ) __snake_case : Tuple = 1 __snake_case : List[Any] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Tuple = Accelerator() # This should work __snake_case : Dict = accelerator.prepare(a_ ) @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = torch.nn.Linear(10 , 10 ) __snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 ) __snake_case : Optional[Any] = Accelerator(cpu=a_ ) __snake_case : str = accelerator.prepare(a_ )
24
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = { "configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"], "tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[str] = ["BertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Dict = [ "BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "BertForMaskedLM", "BertForMultipleChoice", "BertForNextSentencePrediction", "BertForPreTraining", "BertForQuestionAnswering", "BertForSequenceClassification", "BertForTokenClassification", "BertLayer", "BertLMHeadModel", "BertModel", "BertPreTrainedModel", "load_tf_weights_in_bert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = [ "TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFBertEmbeddings", "TFBertForMaskedLM", "TFBertForMultipleChoice", "TFBertForNextSentencePrediction", "TFBertForPreTraining", "TFBertForQuestionAnswering", "TFBertForSequenceClassification", "TFBertForTokenClassification", "TFBertLMHeadModel", "TFBertMainLayer", "TFBertModel", "TFBertPreTrainedModel", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[Any] = ["TFBertTokenizer"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = [ "FlaxBertForCausalLM", "FlaxBertForMaskedLM", "FlaxBertForMultipleChoice", "FlaxBertForNextSentencePrediction", "FlaxBertForPreTraining", "FlaxBertForQuestionAnswering", "FlaxBertForSequenceClassification", "FlaxBertForTokenClassification", "FlaxBertModel", "FlaxBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
371
"""simple docstring""" def lowercase ( _snake_case : int ) ->str: """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) __snake_case : Any = len(bin(_snake_case )[3:] ) __snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:] __snake_case : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(_snake_case )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
24
0
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
350
"""simple docstring""" def lowercase ( ) ->int: """simple docstring""" return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(_snake_case , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
0
"""simple docstring""" def lowercase ( _snake_case : int = 1 , _snake_case : int = 1_000 ) ->Optional[int]: """simple docstring""" __snake_case : List[str] = 1 __snake_case : Any = 0 for divide_by_number in range(UpperCamelCase__ , digit + 1 ): __snake_case : list[int] = [] __snake_case : int = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(UpperCamelCase__ ): __snake_case : Optional[Any] = len(UpperCamelCase__ ) __snake_case : int = divide_by_number else: has_been_divided.append(UpperCamelCase__ ) __snake_case : Tuple = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
351
"""simple docstring""" def lowercase ( _snake_case : int = 100 ) ->int: """simple docstring""" __snake_case : str = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Dict = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'{solution() = }')
24
0
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def lowercase ( ) ->List[str]: """simple docstring""" __snake_case : Union[str, Any] = {} __snake_case : Optional[Any] = 2 while True: __snake_case : Tuple = factor_map.pop(UpperCAmelCase_ , UpperCAmelCase_ ) if factor: __snake_case : Optional[int] = factor + prime while x in factor_map: x += factor __snake_case : Tuple = factor else: __snake_case : List[Any] = prime yield prime prime += 1 def lowercase ( _snake_case : float = 1e1_0 ) ->int: """simple docstring""" __snake_case : Tuple = sieve() __snake_case : Union[str, Any] = 1 while True: __snake_case : Optional[Any] = next(UpperCAmelCase_ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(UpperCAmelCase_ ) n += 2 if __name__ == "__main__": print(solution())
352
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__) @dataclass class _UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ =10000 lowerCamelCase__ =None lowerCamelCase__ =None class _UpperCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCamelCase__ =ParquetConfig def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __snake_case : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a_ , (str, list, tuple) ): __snake_case : Union[str, Any] = data_files if isinstance(a_ , a_ ): __snake_case : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __snake_case : int = [] for split_name, files in data_files.items(): if isinstance(a_ , a_ ): __snake_case : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : int = [dl_manager.iter_files(a_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(a_ ): with open(a_ , '''rb''' ) as f: __snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) ) break splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) ) return splits def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ): with open(a_ , '''rb''' ) as f: __snake_case : int = pq.ParquetFile(a_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __snake_case : Dict = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" ) raise
24
0
"""simple docstring""" from __future__ import annotations from random import random class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ = None ): '''simple docstring''' __snake_case : Tuple = value __snake_case : Any = random() __snake_case : Node | None = None __snake_case : Node | None = None def __repr__(self ): '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return f"""'{self.value}: {self.prior:.5}'""" else: return pformat( {f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 ) def __str__(self ): '''simple docstring''' __snake_case : Tuple = str(self.value ) + " " __snake_case : Optional[int] = str(self.left or '''''' ) __snake_case : Union[str, Any] = str(self.right or '''''' ) return value + left + right def lowercase ( _snake_case : Node | None , _snake_case : int ) ->tuple[Node | None, Node | None]: """simple docstring""" if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: __snake_case : List[Any] = split(root.left , _lowerCAmelCase ) return left, root else: __snake_case : Union[str, Any] = split(root.right , _lowerCAmelCase ) return root, right def lowercase ( _snake_case : Node | None , _snake_case : Node | None ) ->Node | None: """simple docstring""" if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: __snake_case : Optional[Any] = merge(left.right , _lowerCAmelCase ) return left else: __snake_case : int = merge(_lowerCAmelCase , right.left ) return right def lowercase ( _snake_case : Node | None , _snake_case : int ) ->Node | None: """simple docstring""" __snake_case : Union[str, Any] = Node(_lowerCAmelCase ) __snake_case : str = split(_lowerCAmelCase , _lowerCAmelCase ) return merge(merge(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) def lowercase ( _snake_case : Node | None , _snake_case : int ) ->Node | None: """simple docstring""" __snake_case : Tuple = split(_lowerCAmelCase , value - 1 ) __snake_case : Optional[int] = split(_lowerCAmelCase , _lowerCAmelCase ) return merge(_lowerCAmelCase , _lowerCAmelCase ) def lowercase ( _snake_case : Node | None ) ->None: """simple docstring""" if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def lowercase ( _snake_case : Node | None , _snake_case : str ) ->Node | None: """simple docstring""" for arg in args.split(): if arg[0] == "+": __snake_case : str = insert(_lowerCAmelCase , int(arg[1:] ) ) elif arg[0] == "-": __snake_case : Tuple = erase(_lowerCAmelCase , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def lowercase ( ) ->None: """simple docstring""" __snake_case : str = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) __snake_case : Any = input() while args != "q": __snake_case : Tuple = interact_treap(_lowerCAmelCase , _lowerCAmelCase ) print(_lowerCAmelCase ) __snake_case : Optional[Any] = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
353
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __snake_case : Dict = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''sshleifer/tiny-gpt2''' __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = '''sgugger/tiny-distilbert-classification''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) __snake_case : Optional[Any] = TensorFlowBenchmark(a_ ) __snake_case : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Any = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ ) __snake_case : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''sshleifer/tiny-gpt2''' __snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Dict = TensorFlowBenchmark(a_ , [config] ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : int = TensorFlowBenchmark(a_ ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Dict = AutoConfig.from_pretrained(a_ ) __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' __snake_case : Tuple = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] ) __snake_case : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , ) __snake_case : Union[str, Any] = TensorFlowBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(a_ ): self.assertTrue(hasattr(a_ , '''sequential''' ) ) self.assertTrue(hasattr(a_ , '''cumulative''' ) ) self.assertTrue(hasattr(a_ , '''current''' ) ) self.assertTrue(hasattr(a_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ ) __snake_case : Optional[int] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
24
0
"""simple docstring""" def lowercase ( _snake_case : int = 200 ) ->int: """simple docstring""" __snake_case : Union[str, Any] = [1, 2, 5, 10, 20, 50, 100, 200] __snake_case : List[str] = [0] * (pence + 1) __snake_case : List[str] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_snake_case , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 7_3682
354
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: SCREAMING_SNAKE_CASE : Tuple = None try: import msvcrt except ImportError: SCREAMING_SNAKE_CASE : List[str] = None try: import fcntl except ImportError: SCREAMING_SNAKE_CASE : Tuple = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: SCREAMING_SNAKE_CASE : List[str] = OSError # Data # ------------------------------------------------ SCREAMING_SNAKE_CASE : List[Any] = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] SCREAMING_SNAKE_CASE : List[Any] = """3.0.12""" SCREAMING_SNAKE_CASE : int = None def lowercase ( ) ->str: """simple docstring""" global _logger __snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ ) return _logger class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[int] = lock_file return None def __str__(self ): '''simple docstring''' __snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired.""" return temp class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = lock return None def __enter__(self ): '''simple docstring''' return self.lock def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.lock.release() return None class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long __snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ ) # The path to the lock file. __snake_case : str = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __snake_case : Dict = None # The default timeout value. __snake_case : List[Any] = timeout # We use this lock primarily for the lock counter. __snake_case : Tuple = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __snake_case : Optional[Any] = 0 return None @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = float(a_ ) return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ): '''simple docstring''' if timeout is None: __snake_case : List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __snake_case : Optional[int] = id(self ) __snake_case : str = self._lock_file __snake_case : Optional[int] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(a_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __snake_case : Optional[int] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE (self , a_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __snake_case : Tuple = id(self ) __snake_case : str = self._lock_file logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __snake_case : Dict = 0 logger().debug(f"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__(self ): '''simple docstring''' self.acquire() return self def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.release() return None def __del__(self ): '''simple docstring''' self.release(force=a_ ) return None def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Any = os.path.basename(a_ ) if len(a_ ) > max_length and max_length > 0: __snake_case : List[Any] = os.path.dirname(a_ ) __snake_case : Any = str(hash(a_ ) ) __snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(a_ , a_ ) else: return path class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) __snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __snake_case : Any = os.open(self._lock_file , a_ ) except OSError: pass else: try: msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(a_ ) else: __snake_case : Dict = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Dict = None msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 ) os.close(a_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC __snake_case : List[str] = os.open(self._lock_file , a_ ) try: fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(a_ ) else: __snake_case : Optional[int] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Tuple = None fcntl.flock(a_ , fcntl.LOCK_UN ) os.close(a_ ) return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __snake_case : Tuple = os.open(self._lock_file , a_ ) except OSError: pass else: __snake_case : List[Any] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' os.close(self._lock_file_fd ) __snake_case : int = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None SCREAMING_SNAKE_CASE : Dict = None if msvcrt: SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock elif fcntl: SCREAMING_SNAKE_CASE : List[str] = UnixFileLock else: SCREAMING_SNAKE_CASE : str = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
24
0
import argparse import os from accelerate.test_utils import execute_subprocess_async def lowercase ( _snake_case : Union[str, Any]=None ) ->List[str]: """simple docstring""" if subparsers is not None: __snake_case : Optional[int] = subparsers.add_parser('''test''' ) else: __snake_case : Tuple = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' , default=__UpperCAmelCase , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=__UpperCAmelCase ) return parser def lowercase ( _snake_case : Dict ) ->List[str]: """simple docstring""" __snake_case : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: __snake_case : Optional[Any] = script_name else: __snake_case : List[Any] = f"""--config_file={args.config_file} {script_name}""" __snake_case : List[Any] = ['''accelerate-launch'''] + test_args.split() __snake_case : Dict = execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : Any = test_command_parser() __snake_case : Dict = parser.parse_args() test_command(__UpperCAmelCase ) if __name__ == "__main__": main()
355
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ): '''simple docstring''' __snake_case : Any = parent __snake_case : int = batch_size __snake_case : Dict = seq_length __snake_case : List[str] = is_training __snake_case : List[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : str = vocab_size __snake_case : int = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : str = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : Optional[Any] = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Any = scope __snake_case : Any = range_bbox def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : List[str] = bbox[i, j, 3] __snake_case : Any = bbox[i, j, 1] __snake_case : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : List[str] = bbox[i, j, 2] __snake_case : Union[str, Any] = bbox[i, j, 0] __snake_case : Dict = t __snake_case : Optional[int] = None if self.use_input_mask: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case : Dict = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[str] = None __snake_case : Union[str, Any] = None if self.use_labels: __snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ ) __snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ ) __snake_case : List[str] = model(a_ , bbox=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : List[str] = LiltForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Tuple = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Dict = config_and_inputs __snake_case : Any = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ =( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Dict = type self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = LiltModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ ) __snake_case : Dict = torch.tensor([[1, 2]] , device=a_ ) __snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ ) # forward pass with torch.no_grad(): __snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ ) __snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] ) __snake_case : str = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , ) self.assertTrue(outputs.last_hidden_state.shape , a_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
24
0
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _UpperCAmelCase ( A__ ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='BlipImageProcessor' lowerCamelCase__ ='AutoTokenizer' def __init__(self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = False super().__init__(lowerCamelCase__ , lowerCamelCase__ ) __snake_case : List[Any] = self.image_processor def __call__(self , a_ = None , a_ = None , a_ = True , a_ = False , a_ = None , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ): '''simple docstring''' if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: __snake_case : Tuple = self.tokenizer __snake_case : Dict = self.tokenizer( text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , ) return text_encoding # add pixel_values __snake_case : int = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ ) if text is not None: __snake_case : Union[str, Any] = self.tokenizer( text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , ) else: __snake_case : Dict = None if text_encoding is not None: encoding_image_processor.update(lowerCamelCase__ ) return encoding_image_processor def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.tokenizer.model_input_names __snake_case : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
356
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): '''simple docstring''' __snake_case : List[Any] = parent __snake_case : List[Any] = batch_size __snake_case : str = seq_length __snake_case : Any = is_training __snake_case : Any = use_input_mask __snake_case : str = use_token_type_ids __snake_case : Dict = use_labels __snake_case : int = vocab_size __snake_case : Union[str, Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : str = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : str = hidden_act __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : Dict = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : Union[str, Any] = initializer_range __snake_case : str = num_labels __snake_case : Dict = num_choices __snake_case : Optional[int] = scope def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Dict = None if self.use_input_mask: __snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Tuple = None __snake_case : List[str] = None __snake_case : Dict = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = DistilBertModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model(a_ , a_ ) __snake_case : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : Optional[Any] = model( a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Any = self.num_labels __snake_case : Optional[int] = DistilBertForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = self.num_labels __snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = self.num_choices __snake_case : Any = DistilBertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[int] = model( a_ , attention_mask=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs __snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ =( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = DistilBertModelTester(self ) __snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Tuple = DistilBertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __snake_case : List[str] = True __snake_case : Tuple = model_class(config=a_ ) __snake_case : Any = self._prepare_for_class(a_ , a_ ) __snake_case : Dict = torch.jit.trace( a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) ) __snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ ) loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case : List[Any] = model(a_ , attention_mask=a_ )[0] __snake_case : Tuple = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , a_ ) __snake_case : Optional[int] = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
24
0
"""simple docstring""" from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( a_, a_ ): '''simple docstring''' @register_to_config def __init__(self , a_ , a_ = None , a_ = None ): '''simple docstring''' super().__init__() __snake_case : Optional[Any] = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" __snake_case : Tuple = torch.zeros(lowercase_ , lowercase_ ) else: __snake_case : Any = None __snake_case : List[str] = torch.nn.Parameter(lowercase_ ) class _UpperCAmelCase ( a_ ): '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=lowercase_ , transformer=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : int = len(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else 1 # get prompt text embeddings __snake_case : int = self.tokenizer( lowercase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) __snake_case : List[str] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) __snake_case : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length] __snake_case : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 __snake_case : str = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowercase_ ) # duplicate text embeddings for each generation per prompt __snake_case : Tuple = prompt_embeds.repeat_interleave(lowercase_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: __snake_case : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings __snake_case : str = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase_ , 1 , 1 ) else: __snake_case : List[Any] = [''''''] * batch_size __snake_case : Any = text_input_ids.shape[-1] __snake_case : Optional[Any] = self.tokenizer( lowercase_ , padding='''max_length''' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='''pt''' , ) __snake_case : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings __snake_case : List[Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowercase_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __snake_case : Dict = negative_prompt_embeds.shape[1] __snake_case : List[Any] = negative_prompt_embeds.repeat(1 , lowercase_ , 1 ) __snake_case : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__(self , a_ , a_ = 1_00 , a_ = 5.0 , a_ = 1.0 , a_ = 1 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , ): '''simple docstring''' if isinstance(lowercase_ , lowercase_ ): __snake_case : int = 1 elif isinstance(lowercase_ , lowercase_ ): __snake_case : Optional[int] = len(lowercase_ ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}""" ) __snake_case : Any = batch_size * num_images_per_prompt __snake_case : List[Any] = guidance_scale > 1.0 __snake_case : Optional[int] = self._encode_prompt(lowercase_ , lowercase_ , lowercase_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(lowercase_ )}.""" ) # get the initial completely masked latents unless the user supplied it __snake_case : int = (batch_size, self.transformer.num_latent_pixels) if latents is None: __snake_case : List[str] = self.transformer.num_vector_embeds - 1 __snake_case : Any = torch.full(lowercase_ , lowercase_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( '''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,''' f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" ) __snake_case : Optional[Any] = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(lowercase_ , device=self.device ) __snake_case : Dict = self.scheduler.timesteps.to(self.device ) __snake_case : Any = latents for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the sample if we are doing classifier free guidance __snake_case : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` __snake_case : Dict = self.transformer(lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ ).sample if do_classifier_free_guidance: __snake_case : List[Any] = model_output.chunk(2 ) __snake_case : Tuple = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(lowercase_ , dim=1 , keepdim=lowercase_ ) __snake_case : Dict = self.truncate(lowercase_ , lowercase_ ) # remove `log(0)`'s (`-inf`s) __snake_case : Optional[Any] = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 __snake_case : List[str] = self.scheduler.step(lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ , lowercase_ ) __snake_case : List[str] = self.vqvae.config.vq_embed_dim __snake_case : Optional[Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) __snake_case : Optional[Any] = self.vqvae.quantize.get_codebook_entry(lowercase_ , shape=lowercase_ ) __snake_case : Dict = self.vqvae.decode(lowercase_ , force_not_quantize=lowercase_ ).sample __snake_case : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) __snake_case : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __snake_case : Optional[Any] = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = torch.sort(lowercase_ , 1 , descending=lowercase_ ) __snake_case : Any = torch.exp(lowercase_ ) __snake_case : Union[str, Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out __snake_case : Optional[int] = torch.full_like(keep_mask[:, 0:1, :] , lowercase_ ) __snake_case : Tuple = torch.cat((all_true, keep_mask) , dim=1 ) __snake_case : List[str] = keep_mask[:, :-1, :] __snake_case : Optional[Any] = keep_mask.gather(1 , indices.argsort(1 ) ) __snake_case : Dict = log_p_x_0.clone() __snake_case : Tuple = -torch.inf # -inf = log(0) return rv
357
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]: """simple docstring""" def get_masked_lm_array(_snake_case : str ): __snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : str = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Any = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_array(_snake_case : str ): __snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_layer_array(_snake_case : int , _snake_case : str ): __snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[Any] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ): __snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case ) __snake_case : int = array.reshape(_snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) print(f"""Loading model based on config from {config_path}...""" ) __snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case ) __snake_case : Dict = BertForMaskedLM(_snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __snake_case : BertSelfAttention = layer.attention.self __snake_case : int = get_encoder_attention_layer_array( _snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __snake_case : List[Any] = get_encoder_attention_layer_array( _snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __snake_case : Union[str, Any] = get_encoder_attention_layer_array( _snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case : BertSelfOutput = layer.attention.output __snake_case : Dict = get_encoder_attention_layer_array( _snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' ) __snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' ) # Intermediate __snake_case : BertIntermediate = layer.intermediate __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' ) __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' ) # Output __snake_case : BertOutput = layer.output __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' ) __snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' ) __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' ) __snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' ) # Embeddings __snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' ) __snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' ) __snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' ) __snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __snake_case : Optional[Any] = model.cls.predictions.transform __snake_case : Dict = get_masked_lm_array('''dense/kernel''' ) __snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' ) __snake_case : str = get_masked_lm_array('''layer_norm/gamma''' ) __snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' ) __snake_case : Tuple = get_masked_lm_array('''embedding_table''' ) # Pooling __snake_case : Optional[Any] = BertPooler(config=_snake_case ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(_snake_case ) # Integration test - should load without any errors ;) __snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
0
"""simple docstring""" # flake8: noqa # Lint as: python3 SCREAMING_SNAKE_CASE : Tuple = [ """VerificationMode""", """Version""", """disable_progress_bar""", """enable_progress_bar""", """is_progress_bar_enabled""", """experimental""", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
358
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ): '''simple docstring''' super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ ) __snake_case : Union[str, Any] = Sql( cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = None __snake_case : Dict = None __snake_case : Dict = None __snake_case : List[str] = None self.builder.download_and_prepare( download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , ) # Build dataset for splits __snake_case : Any = self.builder.as_dataset( split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory ) return dataset class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __snake_case : List[str] = dataset __snake_case : Tuple = name __snake_case : Optional[int] = con __snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __snake_case : Dict = num_proc __snake_case : Dict = to_sql_kwargs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ ) __snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ ) __snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ ) __snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs ) return written def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case , __snake_case , __snake_case : Optional[Any] = args __snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __snake_case : Dict = query_table( table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __snake_case : Tuple = batch.to_pandas() __snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ ) return num_rows or len(a_ ) def SCREAMING_SNAKE_CASE (self , a_ , **a_ ): '''simple docstring''' __snake_case : int = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
24
0
"""simple docstring""" from collections import Counter from timeit import timeit def lowercase ( _snake_case : str = "" , ) ->Dict: """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2 def lowercase ( _snake_case : str = "" ) ->int: """simple docstring""" if len(_snake_case ) == 0: return True __snake_case : List[str] = input_str.replace(''' ''' , '''''' ).lower() # character_freq_dict: Stores the frequency of every character in the input string __snake_case : dict[str, int] = {} for character in lower_case_input_str: __snake_case : List[str] = character_freq_dict.get(_snake_case , 0 ) + 1 __snake_case : Union[str, Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def lowercase ( _snake_case : str = "" ) ->List[str]: """simple docstring""" print('''\nFor string = ''' , _snake_case , ''':''' ) print( '''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(_snake_case ) , '''\ttime =''' , timeit( '''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , ) print( '''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(_snake_case ) , '''\ttime =''' , timeit( '''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Tuple = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) SCREAMING_SNAKE_CASE : str = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
359
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='lxmert' lowerCamelCase__ ={} def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[Any] = num_attention_heads __snake_case : int = hidden_act __snake_case : int = intermediate_size __snake_case : Any = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : str = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : List[Any] = num_qa_labels __snake_case : int = num_object_labels __snake_case : Optional[Any] = num_attr_labels __snake_case : Union[str, Any] = l_layers __snake_case : Optional[int] = x_layers __snake_case : Optional[int] = r_layers __snake_case : Tuple = visual_feat_dim __snake_case : Optional[int] = visual_pos_dim __snake_case : Dict = visual_loss_normalizer __snake_case : str = task_matched __snake_case : Optional[Any] = task_mask_lm __snake_case : List[str] = task_obj_predict __snake_case : Optional[Any] = task_qa __snake_case : Any = visual_obj_loss __snake_case : int = visual_attr_loss __snake_case : List[Any] = visual_feat_loss __snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**a_ )
24
0
"""simple docstring""" from __future__ import annotations from random import choice def lowercase ( _snake_case : Optional[int] ) ->str: """simple docstring""" return choice(_a ) def lowercase ( _snake_case : list[int] , _snake_case : int ) ->Optional[Any]: """simple docstring""" __snake_case : List[str] = random_pivot(_a ) # partition based on pivot # linear time __snake_case : List[str] = [e for e in lst if e < pivot] __snake_case : Dict = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(_a ) == k - 1: return pivot # pivot is in elements bigger than k elif len(_a ) < k - 1: return kth_number(_a , k - len(_a ) - 1 ) # pivot is in elements smaller than k else: return kth_number(_a , _a ) if __name__ == "__main__": import doctest doctest.testmod()
360
"""simple docstring""" def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" __snake_case : Tuple = len(_snake_case ) __snake_case : str = sum(_snake_case ) __snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __snake_case : Optional[Any] = True for i in range(1 , s + 1 ): __snake_case : int = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __snake_case : Union[str, Any] = dp[i][j - 1] if arr[i - 1] <= j: __snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __snake_case : List[str] = s - 2 * j break return diff
24
0
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent SCREAMING_SNAKE_CASE : Tuple = {"""UserAgent""": UserAgent().random} def lowercase ( _snake_case : List[Any] ) ->dict: """simple docstring""" __snake_case : str = script.contents[0] __snake_case : str = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : int = f"""https://www.instagram.com/{username}/""" __snake_case : int = self.get_json() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = requests.get(self.url , headers=snake_case__ ).text __snake_case : Optional[Any] = BeautifulSoup(snake_case__ , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self ): '''simple docstring''' return f"""{self.__class__.__name__}(\'{self.username}\')""" def __str__(self ): '''simple docstring''' return f"""{self.fullname} ({self.username}) is {self.biography}""" @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["username"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["full_name"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["biography"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["business_email"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["external_url"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["edge_followed_by"]["count"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["edge_follow"]["count"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["edge_owner_to_timeline_media"]["count"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["profile_pic_url_hd"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["is_verified"] @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.user_data["is_private"] def lowercase ( _snake_case : List[Any] = "github" ) ->None: """simple docstring""" import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions __snake_case : Tuple = InstagramUser(__lowerCAmelCase ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , __lowerCAmelCase ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : Union[str, Any] = InstagramUser("""github""") print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
361
"""simple docstring""" from collections.abc import Callable def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: __snake_case : List[str] = mid else: __snake_case : str = mid __snake_case : str = start + (end - start) / 2.0 return mid def lowercase ( _snake_case : float ) ->float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
24
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig SCREAMING_SNAKE_CASE : Optional[int] = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class _UpperCAmelCase ( a__ ): '''simple docstring''' lowerCamelCase__ ="""albert""" def __init__(self , a_=3_00_00 , a_=1_28 , a_=40_96 , a_=12 , a_=1 , a_=64 , a_=1_63_84 , a_=1 , a_="gelu_new" , a_=0 , a_=0 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=0.1 , a_="absolute" , a_=0 , a_=2 , a_=3 , **a_ , ): '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __snake_case : str = vocab_size __snake_case : Optional[Any] = embedding_size __snake_case : List[str] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : Optional[Any] = num_hidden_groups __snake_case : str = num_attention_heads __snake_case : Optional[int] = inner_group_num __snake_case : List[Any] = hidden_act __snake_case : str = intermediate_size __snake_case : List[Any] = hidden_dropout_prob __snake_case : int = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : str = initializer_range __snake_case : int = layer_norm_eps __snake_case : List[str] = classifier_dropout_prob __snake_case : int = position_embedding_type class _UpperCAmelCase ( a__ ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.task == "multiple-choice": __snake_case : str = {0: "batch", 1: "choice", 2: "sequence"} else: __snake_case : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
362
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[str] = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Any = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class _UpperCAmelCase ( lowerCAmelCase__, lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase__ ="swin" lowerCamelCase__ ={ "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__(self , a_=2_24 , a_=4 , a_=3 , a_=96 , a_=[2, 2, 6, 2] , a_=[3, 6, 12, 24] , a_=7 , a_=4.0 , a_=True , a_=0.0 , a_=0.0 , a_=0.1 , a_="gelu" , a_=False , a_=0.02 , a_=1E-5 , a_=32 , a_=None , a_=None , **a_ , ): '''simple docstring''' super().__init__(**a__ ) __snake_case : Dict = image_size __snake_case : int = patch_size __snake_case : List[Any] = num_channels __snake_case : str = embed_dim __snake_case : Optional[int] = depths __snake_case : Optional[Any] = len(a__ ) __snake_case : Optional[int] = num_heads __snake_case : Optional[int] = window_size __snake_case : Tuple = mlp_ratio __snake_case : int = qkv_bias __snake_case : str = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : List[str] = drop_path_rate __snake_case : int = hidden_act __snake_case : int = use_absolute_embeddings __snake_case : List[str] = layer_norm_eps __snake_case : Tuple = initializer_range __snake_case : List[Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __snake_case : Optional[Any] = int(embed_dim * 2 ** (len(a__ ) - 1) ) __snake_case : Union[str, Any] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(a__ ) + 1 )] __snake_case , __snake_case : Tuple = get_aligned_output_features_output_indices( out_features=a__ , out_indices=a__ , stage_names=self.stage_names ) class _UpperCAmelCase ( lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase__ =version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return 1E-4
363
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='CLIPImageProcessor' lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' ) __snake_case : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a_ , a_ ) def __call__(self , a_=None , a_=None , a_=None , **a_ ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: __snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: __snake_case : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*a_ , **a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*a_ , **a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
0
"""simple docstring""" def lowercase ( ) ->int: """simple docstring""" return 1 def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else five_pence(x - 5 ) + two_pence(snake_case__ ) def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(snake_case__ ) def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(snake_case__ ) def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(snake_case__ ) def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(snake_case__ ) def lowercase ( _snake_case : int ) ->int: """simple docstring""" return 0 if x < 0 else two_pound(x - 200 ) + one_pound(snake_case__ ) def lowercase ( _snake_case : int = 200 ) ->int: """simple docstring""" return two_pound(snake_case__ ) if __name__ == "__main__": print(solution(int(input().strip())))
364
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE : List[Any] = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE : Tuple = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =MBartTokenizer lowerCamelCase__ =[] lowerCamelCase__ =[] def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token super().__init__( vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , ) __snake_case : Tuple = vocab_file __snake_case : Optional[Any] = False if not self.vocab_file else True __snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __snake_case : Optional[int] = { lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX''' __snake_case : Any = self.convert_tokens_to_ids(self._src_lang ) __snake_case : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Tuple = [self.sep_token_id] __snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __snake_case : Optional[int] = src_lang __snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ ) __snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ ) __snake_case : int = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ): '''simple docstring''' __snake_case : int = src_lang __snake_case : List[Any] = tgt_lang return super().prepare_seqaseq_batch(a_ , a_ , **a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : List[Any] = [] __snake_case : Any = [self.eos_token_id, self.cur_lang_code] __snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : Optional[Any] = [] __snake_case : Dict = [self.eos_token_id, self.cur_lang_code] __snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return __snake_case : Optional[Any] = os.path.join( a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
24
0
"""simple docstring""" from collections.abc import Iterable from typing import Generic, TypeVar SCREAMING_SNAKE_CASE : Dict = TypeVar("""_T""") class _UpperCAmelCase ( Generic[_T] ): '''simple docstring''' def __init__(self , a_ = None ): '''simple docstring''' __snake_case : list[_T] = list(iterable or [] ) __snake_case : list[_T] = [] def __len__(self ): '''simple docstring''' return len(self._stacka ) + len(self._stacka ) def __repr__(self ): '''simple docstring''' return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})""" def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' self._stacka.append(SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self._stacka.pop __snake_case : List[Any] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError('''Queue is empty''' ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
365
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__) @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None if is_torch_available(): import torch from torch.utils.data import Dataset class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ): '''simple docstring''' __snake_case : Any = hans_processors[task]() __snake_case : int = os.path.join( a_ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , ) __snake_case : Tuple = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Dict = label_list[2], label_list[1] __snake_case : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case : int = cached_features_file + '''.lock''' with FileLock(a_ ): if os.path.exists(a_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case : Union[str, Any] = torch.load(a_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case : Dict = ( processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) ) logger.info('''Training examples: %s''' , len(a_ ) ) __snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) logger.info('''Saving features into cached file %s''' , a_ ) torch.save(self.features , a_ ) def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ): '''simple docstring''' __snake_case : List[Any] = hans_processors[task]() __snake_case : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Tuple = label_list[2], label_list[1] __snake_case : Dict = label_list __snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) __snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case : Union[str, Any] = tf.data.Dataset.from_generator( a_ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.dataset def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = [] for i, line in enumerate(a_ ): if i == 0: continue __snake_case : Tuple = '''%s-%s''' % (set_type, line[0]) __snake_case : Dict = line[5] __snake_case : int = line[6] __snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case : List[Any] = line[0] examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) ) return examples def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )} __snake_case : Tuple = [] for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , ) __snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0 __snake_case : Union[str, Any] = int(example.pairID ) features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE : Dict = { """hans""": 3, } SCREAMING_SNAKE_CASE : str = { """hans""": HansProcessor, }
24
0
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 class _UpperCAmelCase ( nn.Module ): '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =(16, 32, 96, 256) lowerCamelCase__ =jnp.floataa def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __snake_case : Union[str, Any] = [] for i in range(len(self.block_out_channels ) - 1 ): __snake_case : Optional[Any] = self.block_out_channels[i] __snake_case : Optional[int] = self.block_out_channels[i + 1] __snake_case : Optional[int] = nn.Conv( UpperCamelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(UpperCamelCase__ ) __snake_case : Optional[int] = nn.Conv( UpperCamelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(UpperCamelCase__ ) __snake_case : Tuple = blocks __snake_case : Tuple = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__(self , a_ ): '''simple docstring''' __snake_case : Dict = self.conv_in(UpperCamelCase__ ) __snake_case : int = nn.silu(UpperCamelCase__ ) for block in self.blocks: __snake_case : str = block(UpperCamelCase__ ) __snake_case : Optional[Any] = nn.silu(UpperCamelCase__ ) __snake_case : Optional[Any] = self.conv_out(UpperCamelCase__ ) return embedding @flax_register_to_config class _UpperCAmelCase ( nn.Module, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ =32 lowerCamelCase__ =4 lowerCamelCase__ =( 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'DownBlock2D', ) lowerCamelCase__ =False lowerCamelCase__ =(320, 640, 1280, 1280) lowerCamelCase__ =2 lowerCamelCase__ =8 lowerCamelCase__ =None lowerCamelCase__ =1280 lowerCamelCase__ =0.0 lowerCamelCase__ =False lowerCamelCase__ =jnp.floataa lowerCamelCase__ =True lowerCamelCase__ =0 lowerCamelCase__ ='rgb' lowerCamelCase__ =(16, 32, 96, 256) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size) __snake_case : Any = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa ) __snake_case : Dict = jnp.ones((1,) , dtype=jnp.intaa ) __snake_case : List[str] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) __snake_case : Optional[int] = (1, 3, self.sample_size * 8, self.sample_size * 8) __snake_case : int = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa ) __snake_case : Optional[int] = jax.random.split(UpperCamelCase__ ) __snake_case : Optional[int] = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )["params"] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.block_out_channels __snake_case : Optional[int] = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __snake_case : Union[str, Any] = self.num_attention_heads or self.attention_head_dim # input __snake_case : List[Any] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time __snake_case : Any = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) __snake_case : List[Any] = FlaxTimestepEmbedding(UpperCamelCase__ , dtype=self.dtype ) __snake_case : int = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) __snake_case : Any = self.only_cross_attention if isinstance(UpperCamelCase__ , UpperCamelCase__ ): __snake_case : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): __snake_case : str = (num_attention_heads,) * len(self.down_block_types ) # down __snake_case : str = [] __snake_case : List[str] = [] __snake_case : Union[str, Any] = block_out_channels[0] __snake_case : Tuple = nn.Conv( UpperCamelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(UpperCamelCase__ ) for i, down_block_type in enumerate(self.down_block_types ): __snake_case : Dict = output_channel __snake_case : Union[str, Any] = block_out_channels[i] __snake_case : Tuple = i == len(UpperCamelCase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": __snake_case : List[Any] = FlaxCrossAttnDownBlockaD( in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: __snake_case : str = FlaxDownBlockaD( in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(UpperCamelCase__ ) for _ in range(self.layers_per_block ): __snake_case : Union[str, Any] = nn.Conv( UpperCamelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(UpperCamelCase__ ) if not is_final_block: __snake_case : str = nn.Conv( UpperCamelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(UpperCamelCase__ ) __snake_case : List[Any] = down_blocks __snake_case : List[Any] = controlnet_down_blocks # mid __snake_case : Optional[int] = block_out_channels[-1] __snake_case : Optional[Any] = FlaxUNetMidBlockaDCrossAttn( in_channels=UpperCamelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) __snake_case : List[Any] = nn.Conv( UpperCamelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__(self , a_ , a_ , a_ , a_ , a_ = 1.0 , a_ = True , a_ = False , ): '''simple docstring''' __snake_case : Optional[Any] = self.controlnet_conditioning_channel_order if channel_order == "bgr": __snake_case : Dict = jnp.flip(UpperCamelCase__ , axis=1 ) # 1. time if not isinstance(UpperCamelCase__ , jnp.ndarray ): __snake_case : str = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(UpperCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0: __snake_case : Any = timesteps.astype(dtype=jnp.floataa ) __snake_case : Optional[Any] = jnp.expand_dims(UpperCamelCase__ , 0 ) __snake_case : int = self.time_proj(UpperCamelCase__ ) __snake_case : Tuple = self.time_embedding(UpperCamelCase__ ) # 2. pre-process __snake_case : Dict = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) ) __snake_case : Optional[int] = self.conv_in(UpperCamelCase__ ) __snake_case : str = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) ) __snake_case : Optional[int] = self.controlnet_cond_embedding(UpperCamelCase__ ) sample += controlnet_cond # 3. down __snake_case : Optional[Any] = (sample,) for down_block in self.down_blocks: if isinstance(UpperCamelCase__ , UpperCamelCase__ ): __snake_case : Dict = down_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train ) else: __snake_case : Dict = down_block(UpperCamelCase__ , UpperCamelCase__ , deterministic=not train ) down_block_res_samples += res_samples # 4. mid __snake_case : List[str] = self.mid_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train ) # 5. contronet blocks __snake_case : Tuple = () for down_block_res_sample, controlnet_block in zip(UpperCamelCase__ , self.controlnet_down_blocks ): __snake_case : Any = controlnet_block(UpperCamelCase__ ) controlnet_down_block_res_samples += (down_block_res_sample,) __snake_case : Optional[Any] = controlnet_down_block_res_samples __snake_case : int = self.controlnet_mid_block(UpperCamelCase__ ) # 6. scaling __snake_case : Optional[int] = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=UpperCamelCase__ , mid_block_res_sample=UpperCamelCase__ )
366
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='gptsan-japanese' lowerCamelCase__ =[ 'past_key_values', ] lowerCamelCase__ ={ 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ): '''simple docstring''' __snake_case : Any = vocab_size __snake_case : str = max_position_embeddings __snake_case : Any = d_model __snake_case : List[str] = d_ff __snake_case : Dict = d_ext __snake_case : Optional[Any] = d_spout __snake_case : int = num_switch_layers __snake_case : List[Any] = num_ext_layers __snake_case : Any = num_switch_layers + num_ext_layers __snake_case : Optional[int] = num_heads __snake_case : Tuple = num_experts __snake_case : List[Any] = expert_capacity __snake_case : Dict = dropout_rate __snake_case : Optional[Any] = layer_norm_epsilon __snake_case : Dict = router_bias __snake_case : str = router_jitter_noise __snake_case : List[str] = router_dtype __snake_case : Union[str, Any] = router_ignore_padding_tokens __snake_case : List[str] = output_hidden_states __snake_case : Optional[Any] = output_attentions __snake_case : Any = initializer_factor __snake_case : int = output_router_logits __snake_case : Union[str, Any] = use_cache super().__init__( separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
24
0
"""simple docstring""" from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class _UpperCAmelCase : '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() class _UpperCAmelCase ( _a ): '''simple docstring''' def __init__(self , a_ , a_ = False , **a_ ): '''simple docstring''' __snake_case : Dict = tokenizer __snake_case : Any = skip_prompt __snake_case : Optional[int] = decode_kwargs # variables used in the streaming process __snake_case : Dict = [] __snake_case : Optional[int] = 0 __snake_case : Union[str, Any] = True def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('''TextStreamer only supports batch size 1''' ) elif len(value.shape ) > 1: __snake_case : List[Any] = value[0] if self.skip_prompt and self.next_tokens_are_prompt: __snake_case : Dict = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) __snake_case : List[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('''\n''' ): __snake_case : Any = text[self.print_len :] __snake_case : Any = [] __snake_case : Dict = 0 # If the last token is a CJK character, we print the characters. elif len(a_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ): __snake_case : List[Any] = text[self.print_len :] self.print_len += len(a_ ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: __snake_case : Dict = text[self.print_len : text.rfind(''' ''' ) + 1] self.print_len += len(a_ ) self.on_finalized_text(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if len(self.token_cache ) > 0: __snake_case : List[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) __snake_case : Optional[int] = text[self.print_len :] __snake_case : Union[str, Any] = [] __snake_case : int = 0 else: __snake_case : Tuple = '' __snake_case : Dict = True self.on_finalized_text(a_ , stream_end=a_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ): '''simple docstring''' print(a_ , flush=a_ , end='''''' if not stream_end else None ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False class _UpperCAmelCase ( _a ): '''simple docstring''' def __init__(self , a_ , a_ = False , a_ = None , **a_ ): '''simple docstring''' super().__init__(a_ , a_ , **a_ ) __snake_case : List[str] = Queue() __snake_case : Union[str, Any] = None __snake_case : List[str] = timeout def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ): '''simple docstring''' self.text_queue.put(a_ , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__(self ): '''simple docstring''' return self def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
367
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } SCREAMING_SNAKE_CASE : int = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowercase ( _snake_case : Optional[int] ) ->int: """simple docstring""" __snake_case : int = {} with open(_snake_case , '''r''' ) as file: for line_number, line in enumerate(_snake_case ): __snake_case : Union[str, Any] = line.strip() if line: __snake_case : str = line.split() __snake_case : Union[str, Any] = line_number __snake_case : Dict = words[0] __snake_case : str = value return result def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]: """simple docstring""" for attribute in key.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : str = '''param''' if weight_type is not None and weight_type != "param": __snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape elif weight_type is not None and weight_type == "param": __snake_case : Optional[Any] = hf_pointer for attribute in hf_param_name.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : List[str] = shape_pointer.shape # let's reduce dimension __snake_case : int = value[0] else: __snake_case : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : List[Any] = value elif weight_type == "weight_g": __snake_case : Tuple = value elif weight_type == "weight_v": __snake_case : str = value elif weight_type == "bias": __snake_case : str = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __snake_case : List[Any] = getattr(_snake_case , _snake_case ) __snake_case : int = value else: __snake_case : List[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int: """simple docstring""" __snake_case : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : List[str] = '''param''' if weight_type is not None and weight_type != "param": __snake_case : str = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __snake_case : Tuple = '''.'''.join([key, hf_param_name] ) else: __snake_case : Optional[int] = key __snake_case : List[Any] = value if '''lm_head''' in full_key else value[0] SCREAMING_SNAKE_CASE : Tuple = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict: """simple docstring""" __snake_case : Tuple = False for key, mapped_key in MAPPING.items(): __snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case : int = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2] __snake_case : Tuple = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: __snake_case : Union[str, Any] = '''weight_g''' elif "weight_v" in name: __snake_case : List[str] = '''weight_v''' elif "bias" in name: __snake_case : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case : List[Any] = '''weight''' else: __snake_case : Union[str, Any] = None if hf_dict is not None: rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) else: set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) return is_used return is_used def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any: """simple docstring""" __snake_case : Union[str, Any] = [] __snake_case : Union[str, Any] = fairseq_model.state_dict() __snake_case : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __snake_case : str = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case : Union[str, Any] = True else: __snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case ) if not is_used: unused_weights.append(_snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] __snake_case : str = name.split('''.''' ) __snake_case : Optional[int] = int(items[0] ) __snake_case : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_snake_case ) @torch.no_grad() def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict: """simple docstring""" if config_path is not None: __snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case ) else: __snake_case : Tuple = WavaVecaConfig() if is_seq_class: __snake_case : Optional[int] = read_txt_into_dict(_snake_case ) __snake_case : List[Any] = idalabel __snake_case : int = WavaVecaForSequenceClassification(_snake_case ) __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) feature_extractor.save_pretrained(_snake_case ) elif is_finetuned: if dict_path: __snake_case : int = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Tuple = target_dict.pad_index __snake_case : int = target_dict.bos_index __snake_case : Tuple = target_dict.eos_index __snake_case : Optional[Any] = len(target_dict.symbols ) __snake_case : Any = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) __snake_case : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case : Dict = 0 __snake_case : List[Any] = 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_snake_case , _snake_case ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) __snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False __snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) __snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) __snake_case : Optional[int] = WavaVecaForCTC(_snake_case ) else: __snake_case : Tuple = WavaVecaForPreTraining(_snake_case ) if is_finetuned or is_seq_class: __snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' ) __snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case ) __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case ) __snake_case : int = model[0].eval() recursively_load_weights(_snake_case , _snake_case , not is_finetuned ) hf_wavavec.save_pretrained(_snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) SCREAMING_SNAKE_CASE : Any = parser.parse_args() SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
24
0
"""simple docstring""" def lowercase ( _snake_case : str ) ->List[Any]: """simple docstring""" return " ".join( ''''''.join(word[::-1] ) if len(__UpperCamelCase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("""Hey wollef sroirraw"""))
368
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=__snake_case ): '''simple docstring''' lowerCamelCase__ =['transformers', 'torch', 'note_seq'] def __init__(self , *a_ , **a_ ): '''simple docstring''' requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
24
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE : Tuple = { 'configuration_clip': [ 'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPConfig', 'CLIPOnnxConfig', 'CLIPTextConfig', 'CLIPVisionConfig', ], 'processing_clip': ['CLIPProcessor'], 'tokenization_clip': ['CLIPTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[Any] = ['CLIPTokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[Any] = ['CLIPFeatureExtractor'] SCREAMING_SNAKE_CASE : Any = ['CLIPImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[Any] = [ 'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPModel', 'CLIPPreTrainedModel', 'CLIPTextModel', 'CLIPTextModelWithProjection', 'CLIPVisionModel', 'CLIPVisionModelWithProjection', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = [ 'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFCLIPModel', 'TFCLIPPreTrainedModel', 'TFCLIPTextModel', 'TFCLIPVisionModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Tuple = [ 'FlaxCLIPModel', 'FlaxCLIPPreTrainedModel', 'FlaxCLIPTextModel', 'FlaxCLIPTextPreTrainedModel', 'FlaxCLIPVisionModel', 'FlaxCLIPVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
369
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ): '''simple docstring''' __snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20} __snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __snake_case : Tuple = parent __snake_case : Tuple = batch_size __snake_case : Tuple = num_channels __snake_case : List[str] = image_size __snake_case : Optional[Any] = min_resolution __snake_case : List[Any] = max_resolution __snake_case : List[Any] = do_resize __snake_case : Dict = size __snake_case : Dict = do_center_crop __snake_case : Dict = crop_size __snake_case : str = do_flip_channel_order def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , '''do_resize''' ) ) self.assertTrue(hasattr(a_ , '''size''' ) ) self.assertTrue(hasattr(a_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a_ , '''center_crop''' ) ) self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input __snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
24
0
"""simple docstring""" def lowercase ( _snake_case : int , _snake_case : int ) ->List[Any]: """simple docstring""" while second != 0: __snake_case : str = first & second first ^= second __snake_case : List[str] = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : int = int(input("""Enter the first number: """).strip()) SCREAMING_SNAKE_CASE : Dict = int(input("""Enter the second number: """).strip()) print(F'{add(first, second) = }')
370
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : int = torch.nn.Linear(2 , 4 ) __snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) __snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) __snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) __snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def lowercase ( _snake_case : str ) ->Optional[Any]: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def lowercase ( _snake_case : Union[str, Any] ) ->Tuple: """simple docstring""" __snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_snake_case ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a_ ): __snake_case : Any = Accelerator(cpu=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case : Optional[int] = GradientState() assert state.num_steps == 1 __snake_case : str = 4 assert state.num_steps == 4 assert state.sync_gradients is True __snake_case : List[Any] = False assert state.sync_gradients is False GradientState._reset_state() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a_ , **a_ ): pass with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ): __snake_case : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : Any = get_signature(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : List[Any] = get_signature(a_ ) # saving hook def save_config(a_ , a_ , a_ ): __snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f: json.dump(a_ , a_ ) # loading hook def load_config(a_ , a_ ): with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f: __snake_case : Any = json.load(a_ ) __snake_case : List[str] = config['''class_name'''] __snake_case : str = accelerator.register_save_state_pre_hook(a_ ) __snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Any = '''random''' # make sure loaded weights match with hooks accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks removed load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Union[str, Any] = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components() __snake_case : Union[str, Any] = None # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertTrue(dummy_obj is None ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() __snake_case : Optional[int] = [1, 2, 3] # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , ) __snake_case : Optional[Any] = Accelerator() # This should work __snake_case : Any = accelerator.prepare(a_ ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Any = Accelerator() with init_empty_weights(): __snake_case : List[str] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : Union[str, Any] = infer_auto_device_map(a_ ) __snake_case : str = '''cpu''' __snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ ) # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Dict = accelerator.prepare(a_ ) @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): __snake_case : Any = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : List[Any] = infer_auto_device_map(a_ ) __snake_case : Dict = 1 __snake_case : str = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Any = Accelerator() # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Tuple = accelerator.prepare(a_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) __snake_case : Tuple = infer_auto_device_map(a_ ) __snake_case : Tuple = 1 __snake_case : List[Any] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Tuple = Accelerator() # This should work __snake_case : Dict = accelerator.prepare(a_ ) @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = torch.nn.Linear(10 , 10 ) __snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 ) __snake_case : Optional[Any] = Accelerator(cpu=a_ ) __snake_case : str = accelerator.prepare(a_ )
24
0
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowercase ( _snake_case : int , _snake_case : Tuple , _snake_case : Dict ) ->Any: """simple docstring""" __snake_case : Tuple = 1.5 __snake_case : Optional[Any] = int(factor * num_class_images ) __snake_case : str = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__a , aesthetic_weight=0.1 ) os.makedirs(f"""{class_data_dir}/images""" , exist_ok=__a ) if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: __snake_case : Dict = client.query(text=__a ) if len(__a ) >= factor * num_class_images or num_images > 1e4: break else: __snake_case : Optional[Any] = int(factor * num_images ) __snake_case : Optional[Any] = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__a , aesthetic_weight=0.1 , ) __snake_case : List[str] = 0 __snake_case : Tuple = 0 __snake_case : Any = tqdm(desc='''downloading real regularization images''' , total=__a ) with open(f"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(f"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open( f"""{class_data_dir}/images.txt""" , '''w''' ) as fa: while total < num_class_images: __snake_case : List[Any] = class_images[count] count += 1 try: __snake_case : Union[str, Any] = requests.get(images['''url'''] ) if img.status_code == 200: __snake_case : int = Image.open(BytesIO(img.content ) ) with open(f"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowercase ( ) ->str: """simple docstring""" __snake_case : List[str] = argparse.ArgumentParser('''''' , add_help=__a ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__a , type=__a ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__a , type=__a ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=__a ) return parser.parse_args() if __name__ == "__main__": SCREAMING_SNAKE_CASE : Any = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
371
"""simple docstring""" def lowercase ( _snake_case : int ) ->str: """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) __snake_case : Any = len(bin(_snake_case )[3:] ) __snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:] __snake_case : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(_snake_case )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
24
0
"""simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } def lowercase ( _snake_case : dict , _snake_case : Any , _snake_case : Union[str, Any] ) ->Tuple: """simple docstring""" __snake_case : List[str] = set() # keep track of all the paths to be checked __snake_case : Optional[int] = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue __snake_case : Optional[int] = queue.pop(0 ) # get the last node from the path __snake_case : Dict = path[-1] if node not in explored: __snake_case : List[Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: __snake_case : int = list(A__ ) new_path.append(A__ ) queue.append(A__ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A__ ) # in case there's no path between the 2 nodes return [] def lowercase ( _snake_case : dict , _snake_case : Tuple , _snake_case : Any ) ->Union[str, Any]: """simple docstring""" if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 __snake_case : Tuple = [start] __snake_case : str = set(A__ ) # Keep tab on distances from `start` node. __snake_case : Optional[int] = {start: 0, target: -1} while queue: __snake_case : str = queue.pop(0 ) if node == target: __snake_case : Optional[Any] = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A__ ) queue.append(A__ ) __snake_case : Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
350
"""simple docstring""" def lowercase ( ) ->int: """simple docstring""" return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(_snake_case , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
0
"""simple docstring""" import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _UpperCAmelCase ( __lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =PhobertTokenizer lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case : Optional[int] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@'''] __snake_case : Tuple = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) __snake_case : int = ['''#version: 0.2''', '''l à</w>'''] __snake_case : List[Any] = {'''unk_token''': '''<unk>'''} __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(f"""{token} {vocab_tokens[token]}\n""" ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(snake_case_ ) ) def SCREAMING_SNAKE_CASE (self , **a_ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = '''Tôi là VinAI Research''' __snake_case : int = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>''' return input_text, output_text def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __snake_case : Any = '''Tôi là VinAI Research''' __snake_case : Union[str, Any] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split() __snake_case : List[str] = tokenizer.tokenize(snake_case_ ) print(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) __snake_case : str = tokens + [tokenizer.unk_token] __snake_case : str = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
351
"""simple docstring""" def lowercase ( _snake_case : int = 100 ) ->int: """simple docstring""" __snake_case : str = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Dict = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'{solution() = }')
24
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __snake_case : List[str] = get_activation('''gelu''' ) self.assertTrue(torch.allclose(gelu_python(a_ ) , torch_builtin(a_ ) ) ) self.assertFalse(torch.allclose(gelu_python(a_ ) , gelu_new(a_ ) ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __snake_case : List[Any] = get_activation('''gelu''' ) __snake_case : List[str] = get_activation('''gelu_10''' ) __snake_case : Any = torch_builtin(a_ ) __snake_case : int = geluaa(a_ ) __snake_case : Union[str, Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(a_ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' get_activation('''gelu''' ) get_activation('''gelu_10''' ) get_activation('''gelu_fast''' ) get_activation('''gelu_new''' ) get_activation('''gelu_python''' ) get_activation('''gelu_pytorch_tanh''' ) get_activation('''linear''' ) get_activation('''mish''' ) get_activation('''quick_gelu''' ) get_activation('''relu''' ) get_activation('''sigmoid''' ) get_activation('''silu''' ) get_activation('''swish''' ) get_activation('''tanh''' ) with self.assertRaises(a_ ): get_activation('''bogus''' ) with self.assertRaises(a_ ): get_activation(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = get_activation('''gelu''' ) __snake_case : Dict = 1 __snake_case : int = get_activation('''gelu''' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(a_ ): __snake_case : Optional[int] = acta.a
352
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__) @dataclass class _UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ =10000 lowerCamelCase__ =None lowerCamelCase__ =None class _UpperCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCamelCase__ =ParquetConfig def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __snake_case : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a_ , (str, list, tuple) ): __snake_case : Union[str, Any] = data_files if isinstance(a_ , a_ ): __snake_case : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __snake_case : int = [] for split_name, files in data_files.items(): if isinstance(a_ , a_ ): __snake_case : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : int = [dl_manager.iter_files(a_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(a_ ): with open(a_ , '''rb''' ) as f: __snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) ) break splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) ) return splits def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ): with open(a_ , '''rb''' ) as f: __snake_case : int = pq.ParquetFile(a_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __snake_case : Dict = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" ) raise
24
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = inspect.getfile(accelerate.test_utils ) __snake_case : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) __snake_case : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) __snake_case : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices.""" ) __snake_case : Tuple = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(a__ , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices.""" ) __snake_case : Tuple = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(f"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(a__ , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(a__ , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) __snake_case : str = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(a__ , env=os.environ.copy() ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[int] = Accelerator() SCREAMING_SNAKE_CASE : Any = (accelerator.state.process_index + 2, 10) SCREAMING_SNAKE_CASE : Tuple = torch.randint(0, 10, shape).to(accelerator.device) SCREAMING_SNAKE_CASE : Any = "" SCREAMING_SNAKE_CASE : List[str] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." SCREAMING_SNAKE_CASE : Any = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
353
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __snake_case : Dict = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''sshleifer/tiny-gpt2''' __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = '''sgugger/tiny-distilbert-classification''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) __snake_case : Optional[Any] = TensorFlowBenchmark(a_ ) __snake_case : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Any = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ ) __snake_case : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''sshleifer/tiny-gpt2''' __snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Dict = TensorFlowBenchmark(a_ , [config] ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : int = TensorFlowBenchmark(a_ ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Dict = AutoConfig.from_pretrained(a_ ) __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' __snake_case : Tuple = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] ) __snake_case : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , ) __snake_case : Union[str, Any] = TensorFlowBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(a_ ): self.assertTrue(hasattr(a_ , '''sequential''' ) ) self.assertTrue(hasattr(a_ , '''cumulative''' ) ) self.assertTrue(hasattr(a_ , '''current''' ) ) self.assertTrue(hasattr(a_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ ) __snake_case : Optional[int] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
24
0
"""simple docstring""" import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _UpperCAmelCase ( unittest.TestCase ): lowerCamelCase__ =MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING lowerCamelCase__ =TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = AudioClassificationPipeline(model=UpperCamelCase_ , feature_extractor=UpperCamelCase_ ) # test with a raw waveform __snake_case : Any = np.zeros((3_40_00,) ) __snake_case : Dict = np.zeros((1_40_00,) ) return audio_classifier, [audioa, audio] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case , __snake_case : Optional[int] = examples __snake_case : List[Any] = audio_classifier(UpperCamelCase_ ) # by default a model is initialized with num_labels=2 self.assertEqual( UpperCamelCase_ , [ {'''score''': ANY(UpperCamelCase_ ), '''label''': ANY(UpperCamelCase_ )}, {'''score''': ANY(UpperCamelCase_ ), '''label''': ANY(UpperCamelCase_ )}, ] , ) __snake_case : Optional[Any] = audio_classifier(UpperCamelCase_ , top_k=1 ) self.assertEqual( UpperCamelCase_ , [ {'''score''': ANY(UpperCamelCase_ ), '''label''': ANY(UpperCamelCase_ )}, ] , ) self.run_torchaudio(UpperCamelCase_ ) @require_torchaudio def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' import datasets # test with a local file __snake_case : Union[str, Any] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) __snake_case : List[Any] = dataset[0]['''audio''']['''array'''] __snake_case : Union[str, Any] = audio_classifier(UpperCamelCase_ ) self.assertEqual( UpperCamelCase_ , [ {'''score''': ANY(UpperCamelCase_ ), '''label''': ANY(UpperCamelCase_ )}, {'''score''': ANY(UpperCamelCase_ ), '''label''': ANY(UpperCamelCase_ )}, ] , ) @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''anton-l/wav2vec2-random-tiny-classifier''' __snake_case : Optional[Any] = pipeline('''audio-classification''' , model=UpperCamelCase_ ) __snake_case : Optional[int] = np.ones((80_00,) ) __snake_case : Tuple = audio_classifier(UpperCamelCase_ , top_k=4 ) __snake_case : Union[str, Any] = [ {'''score''': 0.0842, '''label''': '''no'''}, {'''score''': 0.0838, '''label''': '''up'''}, {'''score''': 0.0837, '''label''': '''go'''}, {'''score''': 0.0834, '''label''': '''right'''}, ] __snake_case : List[str] = [ {'''score''': 0.0845, '''label''': '''stop'''}, {'''score''': 0.0844, '''label''': '''on'''}, {'''score''': 0.0841, '''label''': '''right'''}, {'''score''': 0.0834, '''label''': '''left'''}, ] self.assertIn(nested_simplify(UpperCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) __snake_case : List[Any] = {'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate} __snake_case : Union[str, Any] = audio_classifier(UpperCamelCase_ , top_k=4 ) self.assertIn(nested_simplify(UpperCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' import datasets __snake_case : List[str] = '''superb/wav2vec2-base-superb-ks''' __snake_case : Union[str, Any] = pipeline('''audio-classification''' , model=UpperCamelCase_ ) __snake_case : Optional[Any] = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' ) __snake_case : Tuple = np.array(dataset[3]['''speech'''] , dtype=np.floataa ) __snake_case : str = audio_classifier(UpperCamelCase_ , top_k=4 ) self.assertEqual( nested_simplify(UpperCamelCase_ , decimals=3 ) , [ {'''score''': 0.981, '''label''': '''go'''}, {'''score''': 0.007, '''label''': '''up'''}, {'''score''': 0.006, '''label''': '''_unknown_'''}, {'''score''': 0.001, '''label''': '''down'''}, ] , ) @require_tf @unittest.skip('''Audio classification is not implemented for TF''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass
354
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: SCREAMING_SNAKE_CASE : Tuple = None try: import msvcrt except ImportError: SCREAMING_SNAKE_CASE : List[str] = None try: import fcntl except ImportError: SCREAMING_SNAKE_CASE : Tuple = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: SCREAMING_SNAKE_CASE : List[str] = OSError # Data # ------------------------------------------------ SCREAMING_SNAKE_CASE : List[Any] = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] SCREAMING_SNAKE_CASE : List[Any] = """3.0.12""" SCREAMING_SNAKE_CASE : int = None def lowercase ( ) ->str: """simple docstring""" global _logger __snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ ) return _logger class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[int] = lock_file return None def __str__(self ): '''simple docstring''' __snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired.""" return temp class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = lock return None def __enter__(self ): '''simple docstring''' return self.lock def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.lock.release() return None class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long __snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ ) # The path to the lock file. __snake_case : str = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __snake_case : Dict = None # The default timeout value. __snake_case : List[Any] = timeout # We use this lock primarily for the lock counter. __snake_case : Tuple = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __snake_case : Optional[Any] = 0 return None @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = float(a_ ) return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ): '''simple docstring''' if timeout is None: __snake_case : List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __snake_case : Optional[int] = id(self ) __snake_case : str = self._lock_file __snake_case : Optional[int] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(a_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __snake_case : Optional[int] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE (self , a_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __snake_case : Tuple = id(self ) __snake_case : str = self._lock_file logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __snake_case : Dict = 0 logger().debug(f"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__(self ): '''simple docstring''' self.acquire() return self def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.release() return None def __del__(self ): '''simple docstring''' self.release(force=a_ ) return None def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Any = os.path.basename(a_ ) if len(a_ ) > max_length and max_length > 0: __snake_case : List[Any] = os.path.dirname(a_ ) __snake_case : Any = str(hash(a_ ) ) __snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(a_ , a_ ) else: return path class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) __snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __snake_case : Any = os.open(self._lock_file , a_ ) except OSError: pass else: try: msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(a_ ) else: __snake_case : Dict = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Dict = None msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 ) os.close(a_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC __snake_case : List[str] = os.open(self._lock_file , a_ ) try: fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(a_ ) else: __snake_case : Optional[int] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Tuple = None fcntl.flock(a_ , fcntl.LOCK_UN ) os.close(a_ ) return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __snake_case : Tuple = os.open(self._lock_file , a_ ) except OSError: pass else: __snake_case : List[Any] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' os.close(self._lock_file_fd ) __snake_case : int = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None SCREAMING_SNAKE_CASE : Dict = None if msvcrt: SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock elif fcntl: SCREAMING_SNAKE_CASE : List[str] = UnixFileLock else: SCREAMING_SNAKE_CASE : str = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
24
0
import os from typing import Dict, List, Tuple, TypeVar, Union SCREAMING_SNAKE_CASE : Union[str, Any] = TypeVar("""T""") SCREAMING_SNAKE_CASE : Tuple = Union[List[T], Tuple[T, ...]] SCREAMING_SNAKE_CASE : Union[str, Any] = Union[T, List[T], Dict[str, T]] SCREAMING_SNAKE_CASE : Tuple = Union[str, bytes, os.PathLike]
355
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ): '''simple docstring''' __snake_case : Any = parent __snake_case : int = batch_size __snake_case : Dict = seq_length __snake_case : List[str] = is_training __snake_case : List[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : str = vocab_size __snake_case : int = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : str = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : Optional[Any] = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Any = scope __snake_case : Any = range_bbox def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : List[str] = bbox[i, j, 3] __snake_case : Any = bbox[i, j, 1] __snake_case : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : List[str] = bbox[i, j, 2] __snake_case : Union[str, Any] = bbox[i, j, 0] __snake_case : Dict = t __snake_case : Optional[int] = None if self.use_input_mask: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case : Dict = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[str] = None __snake_case : Union[str, Any] = None if self.use_labels: __snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ ) __snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ ) __snake_case : List[str] = model(a_ , bbox=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : List[str] = LiltForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Tuple = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Dict = config_and_inputs __snake_case : Any = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ =( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Dict = type self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = LiltModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ ) __snake_case : Dict = torch.tensor([[1, 2]] , device=a_ ) __snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ ) # forward pass with torch.no_grad(): __snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ ) __snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] ) __snake_case : str = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , ) self.assertTrue(outputs.last_hidden_state.shape , a_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
24
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } SCREAMING_SNAKE_CASE : Tuple = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def lowercase ( _snake_case : str ) ->str: """simple docstring""" __snake_case : List[str] = {} with open(UpperCAmelCase_ , '''r''' ) as file: for line_number, line in enumerate(UpperCAmelCase_ ): __snake_case : str = line.strip() if line: __snake_case : str = line.split() __snake_case : Optional[int] = line_number __snake_case : Dict = words[0] __snake_case : Optional[int] = value return result def lowercase ( _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Any , _snake_case : Any ) ->str: """simple docstring""" for attribute in key.split('''.''' ): __snake_case : str = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) __snake_case : str = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCAmelCase_ ): __snake_case : str = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : Optional[Any] = 'param' if weight_type is not None and weight_type != "param": __snake_case : Optional[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape elif weight_type is not None and weight_type == "param": __snake_case : Tuple = hf_pointer for attribute in hf_param_name.split('''.''' ): __snake_case : Any = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) __snake_case : str = shape_pointer.shape # let's reduce dimension __snake_case : List[Any] = value[0] else: __snake_case : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : int = value elif weight_type == "weight_g": __snake_case : Optional[int] = value elif weight_type == "weight_v": __snake_case : Optional[int] = value elif weight_type == "bias": __snake_case : List[Any] = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __snake_case : int = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) __snake_case : Optional[Any] = value else: __snake_case : Union[str, Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowercase ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : List[str] ) ->Dict: """simple docstring""" __snake_case : str = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCAmelCase_ ): __snake_case : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : Optional[int] = 'param' if weight_type is not None and weight_type != "param": __snake_case : Dict = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __snake_case : int = '.'.join([key, hf_param_name] ) else: __snake_case : Tuple = key __snake_case : Optional[int] = value if 'lm_head' in full_key else value[0] SCREAMING_SNAKE_CASE : str = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def lowercase ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : str=None , _snake_case : List[Any]=None ) ->Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = False for key, mapped_key in MAPPING.items(): __snake_case : Optional[Any] = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case : Tuple = True if "*" in mapped_key: __snake_case : Optional[Any] = name.split(UpperCAmelCase_ )[0].split('''.''' )[-2] __snake_case : Union[str, Any] = mapped_key.replace('''*''' , UpperCAmelCase_ ) if "weight_g" in name: __snake_case : Optional[Any] = 'weight_g' elif "weight_v" in name: __snake_case : int = 'weight_v' elif "bias" in name: __snake_case : str = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case : Optional[int] = 'weight' else: __snake_case : Union[str, Any] = None if hf_dict is not None: rename_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return is_used return is_used def lowercase ( _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Dict ) ->Optional[Any]: """simple docstring""" __snake_case : Tuple = [] __snake_case : Union[str, Any] = fairseq_model.state_dict() __snake_case : Union[str, Any] = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __snake_case : int = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case : List[str] = True else: __snake_case : int = load_wavaveca_layer(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if not is_used: unused_weights.append(UpperCAmelCase_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( _snake_case : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] ) ->Optional[Any]: """simple docstring""" __snake_case : Any = full_name.split('''conv_layers.''' )[-1] __snake_case : Tuple = name.split('''.''' ) __snake_case : Optional[Any] = int(items[0] ) __snake_case : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __snake_case : str = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : List[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(UpperCAmelCase_ ) @torch.no_grad() def lowercase ( _snake_case : Dict , _snake_case : Tuple , _snake_case : str=None , _snake_case : Optional[Any]=None , _snake_case : Dict=True , _snake_case : Union[str, Any]=False ) ->Tuple: """simple docstring""" if config_path is not None: __snake_case : Optional[int] = WavaVecaConfig.from_pretrained(UpperCAmelCase_ ) else: __snake_case : Dict = WavaVecaConfig() if is_seq_class: __snake_case : Any = read_txt_into_dict(UpperCAmelCase_ ) __snake_case : int = idalabel __snake_case : List[Any] = WavaVecaForSequenceClassification(UpperCAmelCase_ ) __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , ) feature_extractor.save_pretrained(UpperCAmelCase_ ) elif is_finetuned: if dict_path: __snake_case : Any = Dictionary.load(UpperCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : List[Any] = target_dict.pad_index __snake_case : int = target_dict.bos_index __snake_case : int = target_dict.eos_index __snake_case : str = len(target_dict.symbols ) __snake_case : List[Any] = os.path.join(UpperCAmelCase_ , '''vocab.json''' ) if not os.path.isdir(UpperCAmelCase_ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(UpperCAmelCase_ ) ) return os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) __snake_case : int = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case : Optional[int] = 0 __snake_case : str = 1 with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) __snake_case : Union[str, Any] = WavaVecaCTCTokenizer( UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=UpperCAmelCase_ , ) __snake_case : Optional[Any] = True if config.feat_extract_norm == 'layer' else False __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , ) __snake_case : Tuple = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ ) processor.save_pretrained(UpperCAmelCase_ ) __snake_case : Optional[Any] = WavaVecaForCTC(UpperCAmelCase_ ) else: __snake_case : Union[str, Any] = WavaVecaForPreTraining(UpperCAmelCase_ ) if is_finetuned or is_seq_class: __snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case : int = argparse.Namespace(task='''audio_pretraining''' ) __snake_case : List[str] = fairseq.tasks.setup_task(UpperCAmelCase_ ) __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase_ ) __snake_case : Optional[Any] = model[0].eval() recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args() SCREAMING_SNAKE_CASE : List[str] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
356
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): '''simple docstring''' __snake_case : List[Any] = parent __snake_case : List[Any] = batch_size __snake_case : str = seq_length __snake_case : Any = is_training __snake_case : Any = use_input_mask __snake_case : str = use_token_type_ids __snake_case : Dict = use_labels __snake_case : int = vocab_size __snake_case : Union[str, Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : str = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : str = hidden_act __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : Dict = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : Union[str, Any] = initializer_range __snake_case : str = num_labels __snake_case : Dict = num_choices __snake_case : Optional[int] = scope def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Dict = None if self.use_input_mask: __snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Tuple = None __snake_case : List[str] = None __snake_case : Dict = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = DistilBertModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model(a_ , a_ ) __snake_case : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : Optional[Any] = model( a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Any = self.num_labels __snake_case : Optional[int] = DistilBertForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = self.num_labels __snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = self.num_choices __snake_case : Any = DistilBertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[int] = model( a_ , attention_mask=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs __snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ =( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = DistilBertModelTester(self ) __snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Tuple = DistilBertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __snake_case : List[str] = True __snake_case : Tuple = model_class(config=a_ ) __snake_case : Any = self._prepare_for_class(a_ , a_ ) __snake_case : Dict = torch.jit.trace( a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) ) __snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ ) loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case : List[Any] = model(a_ , attention_mask=a_ )[0] __snake_case : Tuple = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , a_ ) __snake_case : Optional[int] = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
24
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available SCREAMING_SNAKE_CASE : str = { 'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[str] = [ 'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoForCausalLM', 'GPTNeoForQuestionAnswering', 'GPTNeoForSequenceClassification', 'GPTNeoForTokenClassification', 'GPTNeoModel', 'GPTNeoPreTrainedModel', 'load_tf_weights_in_gpt_neo', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = [ 'FlaxGPTNeoForCausalLM', 'FlaxGPTNeoModel', 'FlaxGPTNeoPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
357
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]: """simple docstring""" def get_masked_lm_array(_snake_case : str ): __snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : str = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Any = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_array(_snake_case : str ): __snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_layer_array(_snake_case : int , _snake_case : str ): __snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[Any] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ): __snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case ) __snake_case : int = array.reshape(_snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) print(f"""Loading model based on config from {config_path}...""" ) __snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case ) __snake_case : Dict = BertForMaskedLM(_snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __snake_case : BertSelfAttention = layer.attention.self __snake_case : int = get_encoder_attention_layer_array( _snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __snake_case : List[Any] = get_encoder_attention_layer_array( _snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __snake_case : Union[str, Any] = get_encoder_attention_layer_array( _snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case : BertSelfOutput = layer.attention.output __snake_case : Dict = get_encoder_attention_layer_array( _snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' ) __snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' ) # Intermediate __snake_case : BertIntermediate = layer.intermediate __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' ) __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' ) # Output __snake_case : BertOutput = layer.output __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' ) __snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' ) __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' ) __snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' ) # Embeddings __snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' ) __snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' ) __snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' ) __snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __snake_case : Optional[Any] = model.cls.predictions.transform __snake_case : Dict = get_masked_lm_array('''dense/kernel''' ) __snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' ) __snake_case : str = get_masked_lm_array('''layer_norm/gamma''' ) __snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' ) __snake_case : Tuple = get_masked_lm_array('''embedding_table''' ) # Pooling __snake_case : Optional[Any] = BertPooler(config=_snake_case ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(_snake_case ) # Integration test - should load without any errors ;) __snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' lowerCamelCase__ =42 class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @register_to_config def __init__(self , a_ = 6_55_36 , a_ = None , a_ = 2 , a_ = 2 , a_ = 0 , a_ = "fourier" , a_ = True , a_ = False , a_ = 0.0 , a_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , a_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , a_ = "UNetMidBlock1D" , a_ = None , a_ = (32, 32, 64) , a_ = None , a_ = 8 , a_ = 1 , a_ = False , ): '''simple docstring''' super().__init__() __snake_case : Tuple = sample_size # time if time_embedding_type == "fourier": __snake_case : List[str] = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=A__ , log=A__ , flip_sin_to_cos=A__ ) __snake_case : Optional[Any] = 2 * block_out_channels[0] elif time_embedding_type == "positional": __snake_case : Union[str, Any] = Timesteps( block_out_channels[0] , flip_sin_to_cos=A__ , downscale_freq_shift=A__ ) __snake_case : int = block_out_channels[0] if use_timestep_embedding: __snake_case : List[Any] = block_out_channels[0] * 4 __snake_case : str = TimestepEmbedding( in_channels=A__ , time_embed_dim=A__ , act_fn=A__ , out_dim=block_out_channels[0] , ) __snake_case : List[Any] = nn.ModuleList([] ) __snake_case : Tuple = None __snake_case : Tuple = nn.ModuleList([] ) __snake_case : int = None # down __snake_case : Any = in_channels for i, down_block_type in enumerate(A__ ): __snake_case : Dict = output_channel __snake_case : Optional[int] = block_out_channels[i] if i == 0: input_channel += extra_in_channels __snake_case : List[Any] = i == len(A__ ) - 1 __snake_case : Optional[int] = get_down_block( A__ , num_layers=A__ , in_channels=A__ , out_channels=A__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(A__ ) # mid __snake_case : Dict = get_mid_block( A__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A__ , add_downsample=A__ , ) # up __snake_case : List[Any] = list(reversed(A__ ) ) __snake_case : int = reversed_block_out_channels[0] if out_block_type is None: __snake_case : str = out_channels else: __snake_case : Optional[int] = block_out_channels[0] for i, up_block_type in enumerate(A__ ): __snake_case : str = output_channel __snake_case : Dict = ( reversed_block_out_channels[i + 1] if i < len(A__ ) - 1 else final_upsample_channels ) __snake_case : Any = i == len(A__ ) - 1 __snake_case : Union[str, Any] = get_up_block( A__ , num_layers=A__ , in_channels=A__ , out_channels=A__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(A__ ) __snake_case : List[str] = output_channel # out __snake_case : Tuple = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) __snake_case : Union[str, Any] = get_out_block( out_block_type=A__ , num_groups_out=A__ , embed_dim=block_out_channels[0] , out_channels=A__ , act_fn=A__ , fc_dim=block_out_channels[-1] // 4 , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ = True , ): '''simple docstring''' __snake_case : Any = timestep if not torch.is_tensor(A__ ): __snake_case : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0: __snake_case : str = timesteps[None].to(sample.device ) __snake_case : int = self.time_proj(A__ ) if self.config.use_timestep_embedding: __snake_case : List[Any] = self.time_mlp(A__ ) else: __snake_case : List[str] = timestep_embed[..., None] __snake_case : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) __snake_case : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down __snake_case : Optional[int] = () for downsample_block in self.down_blocks: __snake_case , __snake_case : Tuple = downsample_block(hidden_states=A__ , temb=A__ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: __snake_case : Any = self.mid_block(A__ , A__ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): __snake_case : List[Any] = down_block_res_samples[-1:] __snake_case : Tuple = down_block_res_samples[:-1] __snake_case : List[Any] = upsample_block(A__ , res_hidden_states_tuple=A__ , temb=A__ ) # 5. post-process if self.out_block: __snake_case : Optional[int] = self.out_block(A__ , A__ ) if not return_dict: return (sample,) return UNetaDOutput(sample=A__ )
358
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ): '''simple docstring''' super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ ) __snake_case : Union[str, Any] = Sql( cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = None __snake_case : Dict = None __snake_case : Dict = None __snake_case : List[str] = None self.builder.download_and_prepare( download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , ) # Build dataset for splits __snake_case : Any = self.builder.as_dataset( split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory ) return dataset class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __snake_case : List[str] = dataset __snake_case : Tuple = name __snake_case : Optional[int] = con __snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __snake_case : Dict = num_proc __snake_case : Dict = to_sql_kwargs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ ) __snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ ) __snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ ) __snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs ) return written def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case , __snake_case , __snake_case : Optional[Any] = args __snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __snake_case : Dict = query_table( table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __snake_case : Tuple = batch.to_pandas() __snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ ) return num_rows or len(a_ ) def SCREAMING_SNAKE_CASE (self , a_ , **a_ ): '''simple docstring''' __snake_case : int = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
24
0
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def lowercase ( _snake_case : Optional[int] ) ->Any: """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) SCREAMING_SNAKE_CASE : Tuple = """\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n""" class _UpperCAmelCase ( __a ): '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE (a_ ): '''simple docstring''' __snake_case : Optional[int] = parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=a__ , required=a__ , help='''Model\'s type.''' ) train_parser.add_argument( '''--tf_checkpoint''' , type=a__ , required=a__ , help='''TensorFlow checkpoint path or folder.''' ) train_parser.add_argument( '''--pytorch_dump_output''' , type=a__ , required=a__ , help='''Path to the PyTorch saved model output.''' ) train_parser.add_argument('''--config''' , type=a__ , default='''''' , help='''Configuration file path or folder.''' ) train_parser.add_argument( '''--finetuning_task_name''' , type=a__ , default=a__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=a__ ) def __init__(self , a_ , a_ , a_ , a_ , a_ , *a_ , ): '''simple docstring''' __snake_case : Any = logging.get_logger('''transformers-cli/converting''' ) self._logger.info(f"""Loading model {model_type}""" ) __snake_case : Dict = model_type __snake_case : Optional[Any] = tf_checkpoint __snake_case : Any = pytorch_dump_output __snake_case : Any = config __snake_case : Tuple = finetuning_task_name def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(a__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a__ ) if "ckpt" in self._tf_checkpoint.lower(): __snake_case : List[Any] = self._tf_checkpoint __snake_case : List[str] = '''''' else: __snake_case : List[str] = self._tf_checkpoint __snake_case : Any = '''''' convert_transfo_xl_checkpoint_to_pytorch( a__ , self._config , self._pytorch_dump_output , a__ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a__ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(a__ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
359
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='lxmert' lowerCamelCase__ ={} def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[Any] = num_attention_heads __snake_case : int = hidden_act __snake_case : int = intermediate_size __snake_case : Any = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : str = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : List[Any] = num_qa_labels __snake_case : int = num_object_labels __snake_case : Optional[Any] = num_attr_labels __snake_case : Union[str, Any] = l_layers __snake_case : Optional[int] = x_layers __snake_case : Optional[int] = r_layers __snake_case : Tuple = visual_feat_dim __snake_case : Optional[int] = visual_pos_dim __snake_case : Dict = visual_loss_normalizer __snake_case : str = task_matched __snake_case : Optional[Any] = task_mask_lm __snake_case : List[str] = task_obj_predict __snake_case : Optional[Any] = task_qa __snake_case : Any = visual_obj_loss __snake_case : int = visual_attr_loss __snake_case : List[Any] = visual_feat_loss __snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**a_ )
24
0
"""simple docstring""" def lowercase ( _snake_case : list , _snake_case : list , _snake_case : int ) ->int: """simple docstring""" if len(_snake_case ) != len(_snake_case ): raise ValueError('''The length of profit and weight must be same.''' ) if max_weight <= 0: raise ValueError('''max_weight must greater than zero.''' ) if any(p < 0 for p in profit ): raise ValueError('''Profit can not be negative.''' ) if any(w < 0 for w in weight ): raise ValueError('''Weight can not be negative.''' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. __snake_case : Tuple = [p / w for p, w in zip(_snake_case , _snake_case )] # Creating a copy of the list and sorting profit/weight in ascending order __snake_case : str = sorted(_snake_case ) # declaring useful variables __snake_case : int = len(_snake_case ) __snake_case : Optional[Any] = 0 __snake_case : List[Any] = 0 __snake_case : Any = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight __snake_case : str = sorted_profit_by_weight[length - i - 1] __snake_case : Union[str, Any] = profit_by_weight.index(_snake_case ) __snake_case : str = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( """Input profits, weights, and then max_weight (all positive ints) separated by """ """spaces.""" ) SCREAMING_SNAKE_CASE : Tuple = [int(x) for x in input("""Input profits separated by spaces: """).split()] SCREAMING_SNAKE_CASE : Tuple = [int(x) for x in input("""Input weights separated by spaces: """).split()] SCREAMING_SNAKE_CASE : Dict = int(input("""Max weight allowed: """)) # Function Call calc_profit(profit, weight, max_weight)
360
"""simple docstring""" def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" __snake_case : Tuple = len(_snake_case ) __snake_case : str = sum(_snake_case ) __snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __snake_case : Optional[Any] = True for i in range(1 , s + 1 ): __snake_case : int = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __snake_case : Union[str, Any] = dp[i][j - 1] if arr[i - 1] <= j: __snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __snake_case : List[str] = s - 2 * j break return diff
24
0
"""simple docstring""" import re import subprocess import sys SCREAMING_SNAKE_CASE : List[Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") SCREAMING_SNAKE_CASE : List[str] = ( subprocess.check_output(F'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode("""utf-8""").split() ) SCREAMING_SNAKE_CASE : Union[str, Any] = """|""".join(sys.argv[1:]) SCREAMING_SNAKE_CASE : List[str] = re.compile(rF'^({joined_dirs}).*?\.py$') SCREAMING_SNAKE_CASE : Tuple = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
361
"""simple docstring""" from collections.abc import Callable def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: __snake_case : List[str] = mid else: __snake_case : str = mid __snake_case : str = start + (end - start) / 2.0 return mid def lowercase ( _snake_case : float ) ->float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
24
0
"""simple docstring""" from collections.abc import Generator from math import sin def lowercase ( _snake_case : Optional[int] ) ->bytes: """simple docstring""" if len(UpperCAmelCase__ ) != 32: raise ValueError('''Input must be of length 32''' ) __snake_case : Optional[Any] = b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowercase ( _snake_case : Dict ) ->bytes: """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) __snake_case : Dict = format(UpperCAmelCase__ , '''08x''' )[-8:] __snake_case : List[str] = b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def lowercase ( _snake_case : int ) ->bytes: """simple docstring""" __snake_case : Union[str, Any] = b'''''' for char in message: bit_string += format(UpperCAmelCase__ , '''08b''' ).encode('''utf-8''' ) __snake_case : Any = format(len(UpperCAmelCase__ ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(UpperCAmelCase__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowercase ( _snake_case : Optional[int] ) ->Generator[list[int], None, None]: """simple docstring""" if len(UpperCAmelCase__ ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(UpperCAmelCase__ ) , 512 ): __snake_case : Optional[int] = bit_string[pos : pos + 512] __snake_case : List[str] = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowercase ( _snake_case : Union[str, Any] ) ->int: """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) __snake_case : Optional[Any] = format(UpperCAmelCase__ , '''032b''' ) __snake_case : Union[str, Any] = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(UpperCAmelCase__ , 2 ) def lowercase ( _snake_case : List[str] , _snake_case : Optional[Any] ) ->int: """simple docstring""" return (a + b) % 2**32 def lowercase ( _snake_case : List[Any] , _snake_case : List[str] ) ->int: """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowercase ( _snake_case : List[Any] ) ->bytes: """simple docstring""" __snake_case : List[str] = preprocess(UpperCAmelCase__ ) __snake_case : Optional[int] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __snake_case : int = 0x6745_2301 __snake_case : Dict = 0xefcd_ab89 __snake_case : Optional[Any] = 0x98ba_dcfe __snake_case : Tuple = 0x1032_5476 __snake_case : Any = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(UpperCAmelCase__ ): __snake_case : List[str] = aa __snake_case : Tuple = ba __snake_case : List[str] = ca __snake_case : str = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __snake_case : Dict = d ^ (b & (c ^ d)) __snake_case : Dict = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __snake_case : List[Any] = c ^ (d & (b ^ c)) __snake_case : Tuple = (5 * i + 1) % 16 elif i <= 47: __snake_case : int = b ^ c ^ d __snake_case : Dict = (3 * i + 5) % 16 else: __snake_case : List[str] = c ^ (b | not_aa(UpperCAmelCase__ )) __snake_case : int = (7 * i) % 16 __snake_case : Dict = (f + a + added_consts[i] + block_words[g]) % 2**32 __snake_case : Optional[Any] = d __snake_case : Any = c __snake_case : List[Any] = b __snake_case : List[Any] = sum_aa(UpperCAmelCase__ , left_rotate_aa(UpperCAmelCase__ , shift_amounts[i] ) ) # Add hashed chunk to running total __snake_case : Tuple = sum_aa(UpperCAmelCase__ , UpperCAmelCase__ ) __snake_case : Union[str, Any] = sum_aa(UpperCAmelCase__ , UpperCAmelCase__ ) __snake_case : int = sum_aa(UpperCAmelCase__ , UpperCAmelCase__ ) __snake_case : Tuple = sum_aa(UpperCAmelCase__ , UpperCAmelCase__ ) __snake_case : Tuple = reformat_hex(UpperCAmelCase__ ) + reformat_hex(UpperCAmelCase__ ) + reformat_hex(UpperCAmelCase__ ) + reformat_hex(UpperCAmelCase__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
362
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[str] = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' torch.manual_seed(0 ) __snake_case : str = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.dummy_uncond_unet __snake_case : Any = ScoreSdeVeScheduler() __snake_case : int = ScoreSdeVePipeline(unet=_A , scheduler=_A ) sde_ve.to(_A ) sde_ve.set_progress_bar_config(disable=_A ) __snake_case : List[str] = torch.manual_seed(0 ) __snake_case : str = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_A ).images __snake_case : Any = torch.manual_seed(0 ) __snake_case : Tuple = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_A , return_dict=_A )[ 0 ] __snake_case : Optional[int] = image[0, -3:, -3:, -1] __snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __snake_case : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = '''google/ncsnpp-church-256''' __snake_case : Union[str, Any] = UNetaDModel.from_pretrained(_A ) __snake_case : List[Any] = ScoreSdeVeScheduler.from_pretrained(_A ) __snake_case : Any = ScoreSdeVePipeline(unet=_A , scheduler=_A ) sde_ve.to(_A ) sde_ve.set_progress_bar_config(disable=_A ) __snake_case : Union[str, Any] = torch.manual_seed(0 ) __snake_case : Optional[int] = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=_A ).images __snake_case : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __snake_case : Optional[int] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
363
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='CLIPImageProcessor' lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : Any = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' ) __snake_case : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a_ , a_ ) def __call__(self , a_=None , a_=None , a_=None , **a_ ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: __snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: __snake_case : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*a_ , **a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*a_ , **a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
0
"""simple docstring""" import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class _UpperCAmelCase ( a__, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =FlaxAutoencoderKL @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = 4 __snake_case : Dict = 3 __snake_case : Optional[int] = (32, 32) __snake_case : Optional[int] = jax.random.PRNGKey(0 ) __snake_case : Union[str, Any] = jax.random.uniform(SCREAMING_SNAKE_CASE_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = { 'block_out_channels': [32, 64], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } __snake_case : Dict = self.dummy_input return init_dict, inputs_dict
364
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE : List[Any] = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE : Tuple = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =MBartTokenizer lowerCamelCase__ =[] lowerCamelCase__ =[] def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token super().__init__( vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , ) __snake_case : Tuple = vocab_file __snake_case : Optional[Any] = False if not self.vocab_file else True __snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __snake_case : Optional[int] = { lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX''' __snake_case : Any = self.convert_tokens_to_ids(self._src_lang ) __snake_case : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Tuple = [self.sep_token_id] __snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __snake_case : Optional[int] = src_lang __snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ ) __snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ ) __snake_case : int = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ): '''simple docstring''' __snake_case : int = src_lang __snake_case : List[Any] = tgt_lang return super().prepare_seqaseq_batch(a_ , a_ , **a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : List[Any] = [] __snake_case : Any = [self.eos_token_id, self.cur_lang_code] __snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : int = self.convert_tokens_to_ids(a_ ) __snake_case : Optional[Any] = [] __snake_case : Dict = [self.eos_token_id, self.cur_lang_code] __snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return __snake_case : Optional[Any] = os.path.join( a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
24
0
"""simple docstring""" from __future__ import annotations from math import gcd def lowercase ( _snake_case : int , _snake_case : int = 2 , _snake_case : int = 1 , _snake_case : int = 3 , ) ->int | None: """simple docstring""" if num < 2: raise ValueError('''The input value cannot be less than 2''' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_snake_case : int , _snake_case : int , _snake_case : int ) -> int: return (pow(lowerCAmelCase__ , 2 ) + step) % modulus for _ in range(lowerCAmelCase__ ): # These track the position within the cycle detection logic. __snake_case : int = seed __snake_case : Dict = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. __snake_case : List[Any] = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __snake_case : int = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __snake_case : int = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. __snake_case : Tuple = gcd(hare - tortoise , lowerCAmelCase__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. __snake_case : List[Any] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() parser.add_argument( """num""", type=int, help="""The value to find a divisor of""", ) parser.add_argument( """--attempts""", type=int, default=3, help="""The number of attempts before giving up""", ) SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() SCREAMING_SNAKE_CASE : Optional[Any] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F'{args.num} is probably prime') else: SCREAMING_SNAKE_CASE : Optional[Any] = args.num // divisor print(F'{args.num} = {divisor} * {quotient}')
365
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__) @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None @dataclass(frozen=__snake_case ) class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None lowerCamelCase__ =None if is_torch_available(): import torch from torch.utils.data import Dataset class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ): '''simple docstring''' __snake_case : Any = hans_processors[task]() __snake_case : int = os.path.join( a_ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , ) __snake_case : Tuple = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Dict = label_list[2], label_list[1] __snake_case : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case : int = cached_features_file + '''.lock''' with FileLock(a_ ): if os.path.exists(a_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case : Union[str, Any] = torch.load(a_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case : Dict = ( processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) ) logger.info('''Training examples: %s''' , len(a_ ) ) __snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) logger.info('''Saving features into cached file %s''' , a_ ) torch.save(self.features , a_ ) def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ): '''simple docstring''' __snake_case : List[Any] = hans_processors[task]() __snake_case : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case : Tuple = label_list[2], label_list[1] __snake_case : Dict = label_list __snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ ) __snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case : Union[str, Any] = tf.data.Dataset.from_generator( a_ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.dataset def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' return self.features[i] def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.label_list class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = [] for i, line in enumerate(a_ ): if i == 0: continue __snake_case : Tuple = '''%s-%s''' % (set_type, line[0]) __snake_case : Dict = line[5] __snake_case : int = line[6] __snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case : List[Any] = line[0] examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) ) return examples def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]: """simple docstring""" __snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )} __snake_case : Tuple = [] for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , ) __snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0 __snake_case : Union[str, Any] = int(example.pairID ) features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE : Dict = { """hans""": 3, } SCREAMING_SNAKE_CASE : str = { """hans""": HansProcessor, }
24
0
"""simple docstring""" import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef SCREAMING_SNAKE_CASE : Tuple = ( """This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ) def lowercase ( _snake_case : Any , _snake_case : Dict ) ->Optional[Any]: """simple docstring""" warnings.warn(_snake_case , _snake_case ) requires_backends(_snake_case , '''sklearn''' ) return (preds == labels).mean() def lowercase ( _snake_case : Optional[int] , _snake_case : str ) ->List[Any]: """simple docstring""" warnings.warn(_snake_case , _snake_case ) requires_backends(_snake_case , '''sklearn''' ) __snake_case : List[Any] = simple_accuracy(_snake_case , _snake_case ) __snake_case : List[str] = fa_score(y_true=_snake_case , y_pred=_snake_case ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def lowercase ( _snake_case : str , _snake_case : str ) ->Tuple: """simple docstring""" warnings.warn(_snake_case , _snake_case ) requires_backends(_snake_case , '''sklearn''' ) __snake_case : Union[str, Any] = pearsonr(_snake_case , _snake_case )[0] __snake_case : str = spearmanr(_snake_case , _snake_case )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def lowercase ( _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : str ) ->str: """simple docstring""" warnings.warn(_snake_case , _snake_case ) requires_backends(_snake_case , '''sklearn''' ) assert len(_snake_case ) == len(_snake_case ), f"""Predictions and labels have mismatched lengths {len(_snake_case )} and {len(_snake_case )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(_snake_case , _snake_case )} elif task_name == "sst-2": return {"acc": simple_accuracy(_snake_case , _snake_case )} elif task_name == "mrpc": return acc_and_fa(_snake_case , _snake_case ) elif task_name == "sts-b": return pearson_and_spearman(_snake_case , _snake_case ) elif task_name == "qqp": return acc_and_fa(_snake_case , _snake_case ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(_snake_case , _snake_case )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(_snake_case , _snake_case )} elif task_name == "qnli": return {"acc": simple_accuracy(_snake_case , _snake_case )} elif task_name == "rte": return {"acc": simple_accuracy(_snake_case , _snake_case )} elif task_name == "wnli": return {"acc": simple_accuracy(_snake_case , _snake_case )} elif task_name == "hans": return {"acc": simple_accuracy(_snake_case , _snake_case )} else: raise KeyError(_snake_case ) def lowercase ( _snake_case : str , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) ->List[str]: """simple docstring""" warnings.warn(_snake_case , _snake_case ) requires_backends(_snake_case , '''sklearn''' ) if len(_snake_case ) != len(_snake_case ): raise ValueError(f"""Predictions and labels have mismatched lengths {len(_snake_case )} and {len(_snake_case )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(_snake_case , _snake_case )} else: raise KeyError(_snake_case )
366
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='gptsan-japanese' lowerCamelCase__ =[ 'past_key_values', ] lowerCamelCase__ ={ 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ): '''simple docstring''' __snake_case : Any = vocab_size __snake_case : str = max_position_embeddings __snake_case : Any = d_model __snake_case : List[str] = d_ff __snake_case : Dict = d_ext __snake_case : Optional[Any] = d_spout __snake_case : int = num_switch_layers __snake_case : List[Any] = num_ext_layers __snake_case : Any = num_switch_layers + num_ext_layers __snake_case : Optional[int] = num_heads __snake_case : Tuple = num_experts __snake_case : List[Any] = expert_capacity __snake_case : Dict = dropout_rate __snake_case : Optional[Any] = layer_norm_epsilon __snake_case : Dict = router_bias __snake_case : str = router_jitter_noise __snake_case : List[str] = router_dtype __snake_case : Union[str, Any] = router_ignore_padding_tokens __snake_case : List[str] = output_hidden_states __snake_case : Optional[Any] = output_attentions __snake_case : Any = initializer_factor __snake_case : int = output_router_logits __snake_case : Union[str, Any] = use_cache super().__init__( separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
24
0
"""simple docstring""" from manim import * class _UpperCAmelCase ( UpperCamelCase__ ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = Rectangle(height=0.5 , width=0.5 ) __snake_case : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __snake_case : List[str] = Rectangle(height=0.25 , width=0.25 ) __snake_case : int = [mem.copy() for i in range(6 )] __snake_case : Union[str, Any] = [mem.copy() for i in range(6 )] __snake_case : Optional[int] = VGroup(*__a ).arrange(__a , buff=0 ) __snake_case : Optional[Any] = VGroup(*__a ).arrange(__a , buff=0 ) __snake_case : Union[str, Any] = VGroup(__a , __a ).arrange(__a , buff=0 ) __snake_case : List[Any] = Text('''CPU''' , font_size=24 ) __snake_case : Optional[Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__a ) __snake_case : List[Any] = [mem.copy() for i in range(4 )] __snake_case : List[Any] = VGroup(*__a ).arrange(__a , buff=0 ) __snake_case : str = Text('''GPU''' , font_size=24 ) __snake_case : Optional[int] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) gpu.move_to([-1, -1, 0] ) self.add(__a ) __snake_case : List[str] = [mem.copy() for i in range(6 )] __snake_case : int = VGroup(*__a ).arrange(__a , buff=0 ) __snake_case : Tuple = Text('''Model''' , font_size=24 ) __snake_case : Dict = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) model.move_to([3, -1.0, 0] ) self.add(__a ) __snake_case : List[Any] = [] __snake_case : str = [] for i, rect in enumerate(__a ): __snake_case : str = fill.copy().set_fill(__a , opacity=0.8 ) target.move_to(__a ) model_arr.append(__a ) __snake_case : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(__a ) self.add(*__a , *__a ) __snake_case : Optional[int] = [meta_mem.copy() for i in range(6 )] __snake_case : Tuple = [meta_mem.copy() for i in range(6 )] __snake_case : int = VGroup(*__a ).arrange(__a , buff=0 ) __snake_case : List[Any] = VGroup(*__a ).arrange(__a , buff=0 ) __snake_case : str = VGroup(__a , __a ).arrange(__a , buff=0 ) __snake_case : Union[str, Any] = Text('''Disk''' , font_size=24 ) __snake_case : Union[str, Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) disk.move_to([-4, -1.25, 0] ) self.add(__a , __a ) __snake_case : Tuple = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __snake_case : Optional[int] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__a , __a ) __snake_case : Optional[int] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__a ) __snake_case : Tuple = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__a ) ) __snake_case : Union[str, Any] = Square(0.3 ) input.set_fill(__a , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , __a , buff=0.5 ) self.play(Write(__a ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=__a , buff=0.02 ) self.play(MoveToTarget(__a ) ) self.play(FadeOut(__a ) ) __snake_case : Any = Arrow(start=__a , end=__a , color=__a , buff=0.5 ) a.next_to(model_arr[0].get_left() , __a , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) __snake_case : Union[str, Any] = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__a , run_time=3 ) ) __snake_case : Optional[Any] = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02} self.play( Write(__a ) , Circumscribe(model_arr[0] , color=__a , **__a ) , Circumscribe(model_cpu_arr[0] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) __snake_case : Tuple = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , __a , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) __snake_case : int = AnimationGroup( FadeOut(__a , run_time=0.5 ) , MoveToTarget(__a , run_time=0.5 ) , FadeIn(__a , run_time=0.5 ) , lag_ratio=0.2 ) self.play(__a ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: __snake_case : List[str] = 0.7 self.play( Circumscribe(model_arr[i] , **__a ) , Circumscribe(cpu_left_col_base[i] , **__a ) , Circumscribe(cpu_left_col_base[i + 1] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , Circumscribe(model_arr[i + 1] , color=__a , **__a ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=__a , **__a ) , Circumscribe(cpu_left_col_base[-1] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) __snake_case : List[Any] = a_c __snake_case : Union[str, Any] = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(__a ) , FadeOut(__a , run_time=0.5 ) , ) __snake_case : Any = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__a , run_time=3 ) , MoveToTarget(__a ) ) self.wait()
367
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } SCREAMING_SNAKE_CASE : int = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowercase ( _snake_case : Optional[int] ) ->int: """simple docstring""" __snake_case : int = {} with open(_snake_case , '''r''' ) as file: for line_number, line in enumerate(_snake_case ): __snake_case : Union[str, Any] = line.strip() if line: __snake_case : str = line.split() __snake_case : Union[str, Any] = line_number __snake_case : Dict = words[0] __snake_case : str = value return result def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]: """simple docstring""" for attribute in key.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : str = '''param''' if weight_type is not None and weight_type != "param": __snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape elif weight_type is not None and weight_type == "param": __snake_case : Optional[Any] = hf_pointer for attribute in hf_param_name.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : List[str] = shape_pointer.shape # let's reduce dimension __snake_case : int = value[0] else: __snake_case : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : List[Any] = value elif weight_type == "weight_g": __snake_case : Tuple = value elif weight_type == "weight_v": __snake_case : str = value elif weight_type == "bias": __snake_case : str = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __snake_case : List[Any] = getattr(_snake_case , _snake_case ) __snake_case : int = value else: __snake_case : List[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int: """simple docstring""" __snake_case : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : List[str] = '''param''' if weight_type is not None and weight_type != "param": __snake_case : str = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __snake_case : Tuple = '''.'''.join([key, hf_param_name] ) else: __snake_case : Optional[int] = key __snake_case : List[Any] = value if '''lm_head''' in full_key else value[0] SCREAMING_SNAKE_CASE : Tuple = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict: """simple docstring""" __snake_case : Tuple = False for key, mapped_key in MAPPING.items(): __snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case : int = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2] __snake_case : Tuple = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: __snake_case : Union[str, Any] = '''weight_g''' elif "weight_v" in name: __snake_case : List[str] = '''weight_v''' elif "bias" in name: __snake_case : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case : List[Any] = '''weight''' else: __snake_case : Union[str, Any] = None if hf_dict is not None: rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) else: set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) return is_used return is_used def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any: """simple docstring""" __snake_case : Union[str, Any] = [] __snake_case : Union[str, Any] = fairseq_model.state_dict() __snake_case : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __snake_case : str = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case : Union[str, Any] = True else: __snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case ) if not is_used: unused_weights.append(_snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] __snake_case : str = name.split('''.''' ) __snake_case : Optional[int] = int(items[0] ) __snake_case : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_snake_case ) @torch.no_grad() def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict: """simple docstring""" if config_path is not None: __snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case ) else: __snake_case : Tuple = WavaVecaConfig() if is_seq_class: __snake_case : Optional[int] = read_txt_into_dict(_snake_case ) __snake_case : List[Any] = idalabel __snake_case : int = WavaVecaForSequenceClassification(_snake_case ) __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) feature_extractor.save_pretrained(_snake_case ) elif is_finetuned: if dict_path: __snake_case : int = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Tuple = target_dict.pad_index __snake_case : int = target_dict.bos_index __snake_case : Tuple = target_dict.eos_index __snake_case : Optional[Any] = len(target_dict.symbols ) __snake_case : Any = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) __snake_case : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case : Dict = 0 __snake_case : List[Any] = 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_snake_case , _snake_case ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) __snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False __snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) __snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) __snake_case : Optional[int] = WavaVecaForCTC(_snake_case ) else: __snake_case : Tuple = WavaVecaForPreTraining(_snake_case ) if is_finetuned or is_seq_class: __snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' ) __snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case ) __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case ) __snake_case : int = model[0].eval() recursively_load_weights(_snake_case , _snake_case , not is_finetuned ) hf_wavavec.save_pretrained(_snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) SCREAMING_SNAKE_CASE : Any = parser.parse_args() SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
24
0
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: SCREAMING_SNAKE_CASE : List[Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=None , a_=True , a_=True , a_=None , ): '''simple docstring''' __snake_case : int = size if size is not None else {'height': 20, 'width': 20} __snake_case : List[Any] = parent __snake_case : List[Any] = batch_size __snake_case : List[Any] = num_channels __snake_case : str = image_size __snake_case : Optional[Any] = min_resolution __snake_case : str = max_resolution __snake_case : List[Any] = size __snake_case : int = do_normalize __snake_case : Any = do_convert_rgb __snake_case : Tuple = [5_12, 10_24, 20_48, 40_96] __snake_case : Tuple = patch_size if patch_size is not None else {'height': 16, 'width': 16} def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' __snake_case : Any = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('''RGB''' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11, reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.', ) @require_torch @require_vision class _UpperCAmelCase ( _UpperCamelCase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =PixaStructImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = PixaStructImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''do_convert_rgb''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.image_processor_tester.prepare_dummy_image() __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) __snake_case : List[str] = 20_48 __snake_case : Dict = image_processor(_UpperCAmelCase , return_tensors='''pt''' , max_patches=_UpperCAmelCase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __snake_case : Dict = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __snake_case : Union[str, Any] = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __snake_case : Union[str, Any] = image_processor( _UpperCAmelCase , return_tensors='''pt''' , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __snake_case : Dict = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 __snake_case : List[str] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(_UpperCAmelCase ): __snake_case : str = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_UpperCAmelCase ).flattened_patches __snake_case : Union[str, Any] = 'Hello' __snake_case : str = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __snake_case : Union[str, Any] = image_processor( _UpperCAmelCase , return_tensors='''pt''' , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) __snake_case : Any = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __snake_case : Dict = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __snake_case : str = image_processor( _UpperCAmelCase , return_tensors='''pt''' , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __snake_case : Union[str, Any] = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __snake_case : Optional[int] = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __snake_case : Union[str, Any] = image_processor( _UpperCAmelCase , return_tensors='''pt''' , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11, reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.', ) @require_torch @require_vision class _UpperCAmelCase ( _UpperCamelCase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =PixaStructImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = PixaStructImageProcessingTester(self , num_channels=4 ) __snake_case : Tuple = 3 @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''do_convert_rgb''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __snake_case : int = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __snake_case : Optional[int] = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __snake_case : Optional[Any] = image_processor( _UpperCAmelCase , return_tensors='''pt''' , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
368
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=__snake_case ): '''simple docstring''' lowerCamelCase__ =['transformers', 'torch', 'note_seq'] def __init__(self , *a_ , **a_ ): '''simple docstring''' requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ): '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
24
0
"""simple docstring""" import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : str=0 ) ->Any: """simple docstring""" if name is None: __snake_case : str = None else: __snake_case : Optional[Any] = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}" __snake_case : Dict = fmt.format(__SCREAMING_SNAKE_CASE ) # Print and recurse (if needed). if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if msg is not None: print(__SCREAMING_SNAKE_CASE ) for k in val.keys(): recursive_print(__SCREAMING_SNAKE_CASE , val[k] , spaces + 2 ) elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ): print(__SCREAMING_SNAKE_CASE , ''':''' , val.size() ) else: print(__SCREAMING_SNAKE_CASE , ''':''' , __SCREAMING_SNAKE_CASE ) def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : int , _snake_case : List[str] ) ->Tuple: """simple docstring""" __snake_case : Union[str, Any] = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] __snake_case : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:] __snake_case : Optional[int] = param.view(*__SCREAMING_SNAKE_CASE ) __snake_case : Optional[int] = param.transpose(0 , 2 ) __snake_case : Tuple = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] __snake_case : List[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:] __snake_case : int = param.view(*__SCREAMING_SNAKE_CASE ) __snake_case : str = param.transpose(0 , 1 ).contiguous() __snake_case : Optional[Any] = param.view(*__SCREAMING_SNAKE_CASE ) return param def lowercase ( _snake_case : int , _snake_case : Dict , _snake_case : str ) ->Optional[Any]: """simple docstring""" __snake_case : Union[str, Any] = {} # old versions did not store training args __snake_case : str = input_state_dict.get('''args''' , __SCREAMING_SNAKE_CASE ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) __snake_case : Union[str, Any] = ds_args.padded_vocab_size __snake_case : Dict = ds_args.max_position_embeddings __snake_case : Dict = ds_args.hidden_size __snake_case : Tuple = ds_args.num_layers __snake_case : Any = ds_args.num_attention_heads __snake_case : List[str] = ds_args.ffn_hidden_size # pprint(config) # The number of heads. __snake_case : List[str] = config.n_head # The hidden_size per head. __snake_case : Any = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): __snake_case : List[Any] = input_state_dict["checkpoint_version"] else: __snake_case : Union[str, Any] = 0.0 # The model. __snake_case : Union[str, Any] = input_state_dict["model"] # The language model. __snake_case : Any = model["language_model"] # The embeddings. __snake_case : Tuple = lm["embedding"] # The word embeddings. __snake_case : Optional[int] = embeddings["word_embeddings"]["weight"] # Truncate the embedding table to vocab_size rows. __snake_case : List[Any] = word_embeddings[: config.vocab_size, :] __snake_case : Any = word_embeddings # The position embeddings. __snake_case : str = embeddings["position_embeddings"]["weight"] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] __snake_case : Tuple = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" ) # Store the position embeddings. __snake_case : List[str] = pos_embeddings # The transformer. __snake_case : str = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"] # The regex to extract layer names. __snake_case : Any = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' ) # The simple map of names for "automated" rules. __snake_case : Tuple = { "attention.dense": ".attn.c_proj.", "self_attention.dense": ".attn.c_proj.", "mlp.dense_h_to_4h": ".mlp.c_fc.", "mlp.dense_4h_to_h": ".mlp.c_proj.", } # Extract the layers. for key, val in transformer.items(): # Match the name. __snake_case : Union[str, Any] = layer_re.match(__SCREAMING_SNAKE_CASE ) # Stop if that's not a layer if m is None: break # The index of the layer. __snake_case : Optional[int] = int(m.group(1 ) ) # The name of the operation. __snake_case : int = m.group(2 ) # Is it a weight or a bias? __snake_case : int = m.group(3 ) # The name of the layer. __snake_case : List[str] = f"""transformer.h.{layer_idx}""" # For layernorm(s), simply store the layer norm. if op_name.endswith('''layernorm''' ): __snake_case : Union[str, Any] = "ln_1" if op_name.startswith('''input''' ) else "ln_2" __snake_case : int = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. __snake_case : List[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __snake_case : List[Any] = causal_mask # Insert a "dummy" tensor for masked_bias. __snake_case : Optional[Any] = torch.tensor(-1e4 , dtype=torch.floataa ) __snake_case : Any = masked_bias __snake_case : Optional[Any] = fix_query_key_value_ordering(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. __snake_case : str = out_val.transpose(0 , 1 ).contiguous() # Store. __snake_case : Dict = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": __snake_case : Union[str, Any] = fix_query_key_value_ordering(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Store. No change of shape. __snake_case : Optional[int] = out_val # Transpose the weights. elif weight_or_bias == "weight": __snake_case : List[str] = megatron_to_transformers[op_name] __snake_case : List[Any] = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": __snake_case : str = megatron_to_transformers[op_name] __snake_case : List[str] = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. __snake_case : Tuple = transformer["final_layernorm.weight"] __snake_case : Union[str, Any] = transformer["final_layernorm.bias"] # For LM head, transformers' wants the matrix to weight embeddings. __snake_case : List[str] = word_embeddings # It should be done! return output_state_dict def lowercase ( ) ->Tuple: """simple docstring""" __snake_case : List[str] = argparse.ArgumentParser() parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' ) parser.add_argument( '''path_to_checkpoint''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , ) parser.add_argument( '''--config_file''' , default='''''' , type=__SCREAMING_SNAKE_CASE , help='''An optional config json file describing the pre-trained model.''' , ) __snake_case : Tuple = parser.parse_args() # Extract the basename. __snake_case : Optional[int] = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" ) if args.path_to_checkpoint.endswith('''.zip''' ): with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint: with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict: __snake_case : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' ) else: __snake_case : Dict = torch.load(args.path_to_checkpoint , map_location='''cpu''' ) __snake_case : Any = input_state_dict.get('''args''' , __SCREAMING_SNAKE_CASE ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: __snake_case : Union[str, Any] = "gelu_fast" elif ds_args.openai_gelu: __snake_case : int = "gelu_new" else: __snake_case : Dict = "gelu" else: # in the very early days this used to be "gelu_new" __snake_case : Union[str, Any] = "gelu_new" # Spell out all parameters in case the defaults change. __snake_case : List[Any] = GPTaConfig( vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=__SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=__SCREAMING_SNAKE_CASE , summary_activation=__SCREAMING_SNAKE_CASE , summary_proj_to_labels=__SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=50_256 , eos_token_id=50_256 , ) else: __snake_case : Union[str, Any] = GPTaConfig.from_json_file(args.config_file ) __snake_case : str = ["GPT2LMHeadModel"] # Convert. print('''Converting''' ) __snake_case : str = convert_megatron_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: __snake_case : Tuple = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": __snake_case : List[Any] = "gpt2" elif tokenizer_type == "PretrainedFromHF": __snake_case : Any = ds_args.tokenizer_name_or_path else: raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" ) else: __snake_case : Dict = "gpt2" __snake_case : List[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) __snake_case : Union[str, Any] = type(__SCREAMING_SNAKE_CASE ).__name__ __snake_case : Dict = tokenizer_class # Store the config to file. print('''Saving config''' ) config.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer based on args print(f"""Adding {tokenizer_class} tokenizer files""" ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) # Store the state_dict to file. __snake_case : Any = os.path.join(__SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" ) torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
369
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ): '''simple docstring''' __snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20} __snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __snake_case : Tuple = parent __snake_case : Tuple = batch_size __snake_case : Tuple = num_channels __snake_case : List[str] = image_size __snake_case : Optional[Any] = min_resolution __snake_case : List[Any] = max_resolution __snake_case : List[Any] = do_resize __snake_case : Dict = size __snake_case : Dict = do_center_crop __snake_case : Dict = crop_size __snake_case : str = do_flip_channel_order def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase ( __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , '''do_resize''' ) ) self.assertTrue(hasattr(a_ , '''size''' ) ) self.assertTrue(hasattr(a_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(a_ , '''center_crop''' ) ) self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input __snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
24
0
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def lowercase ( _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Dict=None ) ->Optional[Any]: """simple docstring""" assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match""" __snake_case : Optional[Any] = nn.Parameter(__lowerCAmelCase ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match""" __snake_case : str = nn.Parameter(__lowerCAmelCase ) def lowercase ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : List[Any] ) ->Any: """simple docstring""" __snake_case : Optional[int] = np.asarray(weights[0] ) __snake_case : str = np.asarray(weights[1] ) __snake_case : List[Any] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , ) def lowercase ( _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->Any: """simple docstring""" __snake_case : Union[str, Any] = np.asarray(weights[0] ) __snake_case : Optional[int] = np.asarray(weights[1] ) __snake_case : List[Any] = np.asarray(weights[2] ) __snake_case : int = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , ) set_param( torch_layer.self_attention.key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , ) def lowercase ( _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : List[Any] ) ->int: """simple docstring""" __snake_case : Optional[int] = weights[0][0][0] __snake_case : List[Any] = np.asarray(layer_norm_a[0] ) __snake_case : str = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , ) # lsh weights + output __snake_case : Any = weights[0][1] if len(__lowerCAmelCase ) < 4: set_layer_weights_in_torch_lsh(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase ) else: set_layer_weights_in_torch_local(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase ) # intermediate weighs __snake_case : Union[str, Any] = weights[2][0][1][2] # Chunked Feed Forward if len(__lowerCAmelCase ) == 4: __snake_case : Dict = intermediate_weights[2] # layernorm 2 __snake_case : Any = np.asarray(intermediate_weights[0][0] ) __snake_case : List[Any] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , ) # intermediate dense __snake_case : Dict = np.asarray(intermediate_weights[1][0] ) __snake_case : str = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , ) # intermediate out __snake_case : Dict = np.asarray(intermediate_weights[4][0] ) __snake_case : int = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , ) def lowercase ( _snake_case : Dict , _snake_case : str , _snake_case : str ) ->Union[str, Any]: """simple docstring""" __snake_case : Optional[int] = torch_model.reformer # word embeds __snake_case : Union[str, Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowerCAmelCase ) , ) if isinstance(weights[3] , __lowerCAmelCase ): __snake_case : List[Any] = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): __snake_case : Any = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"""{position_embeddings[emb_idx]} emb does not match""" __snake_case : Dict = nn.Parameter(torch.tensor(__lowerCAmelCase ) ) __snake_case : List[Any] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( __lowerCAmelCase ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): __snake_case : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # output layer norm __snake_case : List[str] = np.asarray(weights[7][0] ) __snake_case : Optional[int] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , ) # output embeddings __snake_case : str = np.asarray(weights[9][0] ) __snake_case : str = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , ) def lowercase ( _snake_case : List[str] , _snake_case : int , _snake_case : int ) ->int: """simple docstring""" __snake_case : Optional[int] = ReformerConfig.from_json_file(__lowerCAmelCase ) print(f"""Building PyTorch model from configuration: {config}""" ) __snake_case : Optional[int] = ReformerModelWithLMHead(__lowerCAmelCase ) with open(__lowerCAmelCase , '''rb''' ) as f: __snake_case : Optional[Any] = pickle.load(__lowerCAmelCase )['''weights'''] set_model_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , config.hidden_size ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , __lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained Reformer model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
370
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : int = torch.nn.Linear(2 , 4 ) __snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) __snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) __snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) __snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def lowercase ( _snake_case : str ) ->Optional[Any]: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def lowercase ( _snake_case : Union[str, Any] ) ->Tuple: """simple docstring""" __snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_snake_case ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a_ ): __snake_case : Any = Accelerator(cpu=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case : Optional[int] = GradientState() assert state.num_steps == 1 __snake_case : str = 4 assert state.num_steps == 4 assert state.sync_gradients is True __snake_case : List[Any] = False assert state.sync_gradients is False GradientState._reset_state() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a_ , **a_ ): pass with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ): __snake_case : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : Any = get_signature(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : List[Any] = get_signature(a_ ) # saving hook def save_config(a_ , a_ , a_ ): __snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f: json.dump(a_ , a_ ) # loading hook def load_config(a_ , a_ ): with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f: __snake_case : Any = json.load(a_ ) __snake_case : List[str] = config['''class_name'''] __snake_case : str = accelerator.register_save_state_pre_hook(a_ ) __snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Any = '''random''' # make sure loaded weights match with hooks accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks removed load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Union[str, Any] = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components() __snake_case : Union[str, Any] = None # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertTrue(dummy_obj is None ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() __snake_case : Optional[int] = [1, 2, 3] # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , ) __snake_case : Optional[Any] = Accelerator() # This should work __snake_case : Any = accelerator.prepare(a_ ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Any = Accelerator() with init_empty_weights(): __snake_case : List[str] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : Union[str, Any] = infer_auto_device_map(a_ ) __snake_case : str = '''cpu''' __snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ ) # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Dict = accelerator.prepare(a_ ) @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): __snake_case : Any = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : List[Any] = infer_auto_device_map(a_ ) __snake_case : Dict = 1 __snake_case : str = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Any = Accelerator() # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Tuple = accelerator.prepare(a_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) __snake_case : Tuple = infer_auto_device_map(a_ ) __snake_case : Tuple = 1 __snake_case : List[Any] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Tuple = Accelerator() # This should work __snake_case : Dict = accelerator.prepare(a_ ) @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = torch.nn.Linear(10 , 10 ) __snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 ) __snake_case : Optional[Any] = Accelerator(cpu=a_ ) __snake_case : str = accelerator.prepare(a_ )
24
0
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int SCREAMING_SNAKE_CASE : List[Any] = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ =None def lowercase ( _snake_case : Union[str, Any] , _snake_case : str , ) ->Dict: """simple docstring""" import pyspark def generate_fn(): __snake_case : List[str] = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: __snake_case : List[str] = df_with_partition_id.select('''*''' ).where(f"""part_id = {partition_id}""" ).drop('''part_id''' ) __snake_case : Any = partition_df.collect() __snake_case : Optional[int] = 0 for row in rows: yield f"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class _UpperCAmelCase ( _BaseExamplesIterable ): '''simple docstring''' def __init__(self , a_ , a_=None , ): '''simple docstring''' __snake_case : Dict = df __snake_case : Dict = partition_order or range(self.df.rdd.getNumPartitions() ) __snake_case : Optional[int] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__(self ): '''simple docstring''' yield from self.generate_examples_fn() def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(A_ ) return SparkExamplesIterable(self.df , partition_order=A_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = self.split_shard_indices_by_worker(A_ , A_ ) return SparkExamplesIterable(self.df , partition_order=A_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return len(self.partition_order ) class _UpperCAmelCase ( datasets.DatasetBuilder ): '''simple docstring''' lowerCamelCase__ =SparkConfig def __init__(self , a_ , a_ = None , a_ = None , **a_ , ): '''simple docstring''' import pyspark __snake_case : Tuple = pyspark.sql.SparkSession.builder.getOrCreate() __snake_case : Union[str, Any] = df __snake_case : Optional[int] = working_dir super().__init__( cache_dir=A_ , config_name=str(self.df.semanticHash() ) , **A_ , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' def create_cache_and_write_probe(a_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=A_ ) __snake_case : Optional[Any] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(A_ , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: __snake_case : int = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(A_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' import pyspark def get_arrow_batch_size(a_ ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) __snake_case : str = self.df.count() __snake_case : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. __snake_case : Union[str, Any] = ( self.df.limit(A_ ) .repartition(1 ) .mapInArrow(A_ , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) __snake_case : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. __snake_case : List[Any] = min(A_ , int(approx_total_size / max_shard_size ) ) __snake_case : Union[str, Any] = self.df.repartition(A_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , ): '''simple docstring''' import pyspark __snake_case : str = ParquetWriter if file_format == '''parquet''' else ArrowWriter __snake_case : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(A_ ) ) if self._working_dir else fpath __snake_case : Optional[Any] = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. __snake_case : Optional[int] = self.config.features __snake_case : Dict = self._writer_batch_size __snake_case : str = self._fs.storage_options def write_arrow(a_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. __snake_case : List[Any] = pyspark.TaskContext().taskAttemptId() __snake_case : Optional[Any] = next(A_ , A_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) __snake_case : Dict = 0 __snake_case : Tuple = writer_class( features=A_ , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=A_ , storage_options=A_ , embed_local_files=A_ , ) __snake_case : List[str] = pa.Table.from_batches([first_batch] ) writer.write_table(A_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: __snake_case , __snake_case : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 __snake_case : Union[str, Any] = writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=A_ , storage_options=A_ , embed_local_files=A_ , ) __snake_case : Dict = pa.Table.from_batches([batch] ) writer.write_table(A_ ) if writer._num_bytes > 0: __snake_case , __snake_case : Any = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(A_ ) ): __snake_case : List[Any] = os.path.join(os.path.dirname(A_ ) , os.path.basename(A_ ) ) shutil.move(A_ , A_ ) __snake_case : int = ( self.df.mapInArrow(A_ , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def SCREAMING_SNAKE_CASE (self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ): '''simple docstring''' self._validate_cache_dir() __snake_case : Dict = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(A_ ) __snake_case : str = not is_remote_filesystem(self._fs ) __snake_case : Optional[int] = os.path.join if is_local else posixpath.join __snake_case : int = '''-TTTTT-SSSSS-of-NNNNN''' __snake_case : Optional[int] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" __snake_case : Any = path_join(self._output_dir , A_ ) __snake_case : Union[str, Any] = 0 __snake_case : Optional[Any] = 0 __snake_case : Optional[int] = 0 __snake_case : List[str] = [] __snake_case : Tuple = [] for task_id, content in self._prepare_split_single(A_ , A_ , A_ ): ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[str] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(A_ ) __snake_case : Dict = total_num_examples __snake_case : Dict = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: __snake_case : Dict = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. __snake_case : Dict = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( a_ , a_ , a_ , ): rename( A_ , fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , f"""{global_shard_id:05d}""" ).replace('''NNNNN''' , f"""{total_shards:05d}""" ) , ) __snake_case : List[str] = [] __snake_case : List[str] = 0 for i in range(len(A_ ) ): __snake_case , __snake_case : str = task_id_and_num_shards[i] for shard_id in range(A_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(A_ , len(A_ ) ).map(lambda a_ : _rename_shard(*A_ ) ).collect() else: # don't use any pattern __snake_case : Optional[int] = 0 __snake_case : List[str] = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace(A_ , '''''' ) , ) def SCREAMING_SNAKE_CASE (self , a_ , ): '''simple docstring''' return SparkExamplesIterable(self.df )
371
"""simple docstring""" def lowercase ( _snake_case : int ) ->str: """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) __snake_case : Any = len(bin(_snake_case )[3:] ) __snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:] __snake_case : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(_snake_case )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
24
0
"""simple docstring""" import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def lowercase ( _snake_case : Dict=None ) ->int: """simple docstring""" __snake_case : str = argparse.ArgumentParser(add_help=_snake_case , allow_abbrev=_snake_case ) # The main config parser __snake_case : Tuple = config_command_parser(_snake_case ) # The subparser to add commands to __snake_case : List[str] = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' ) # Then add other parsers with the parent parser default_command_parser(_snake_case , parents=[parent_parser] ) update_command_parser(_snake_case , parents=[parent_parser] ) return config_parser def lowercase ( ) ->Tuple: """simple docstring""" __snake_case : Dict = get_config_parser() __snake_case : Any = config_parser.parse_args() if not hasattr(_snake_case , '''func''' ): config_parser.print_help() exit(1 ) # Run args.func(_snake_case ) if __name__ == "__main__": main()
350
"""simple docstring""" def lowercase ( ) ->int: """simple docstring""" return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(_snake_case , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
0
"""simple docstring""" import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py SCREAMING_SNAKE_CASE : Optional[Any] = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. SCREAMING_SNAKE_CASE : str = direct_transformers_import(PATH_TO_TRANSFORMERS) SCREAMING_SNAKE_CASE : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING SCREAMING_SNAKE_CASE : int = { # used to compute the property `self.chunk_length` """EncodecConfig""": ["""overlap"""], # used as `self.bert_model = BertModel(config, ...)` """DPRConfig""": True, # not used in modeling files, but it's an important information """FSMTConfig""": ["""langs"""], # used internally in the configuration class file """GPTNeoConfig""": ["""attention_types"""], # used internally in the configuration class file """EsmConfig""": ["""is_folding_model"""], # used during training (despite we don't have training script for these models yet) """Mask2FormerConfig""": ["""ignore_value"""], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) """OneFormerConfig""": ["""ignore_value""", """norm"""], # used during preprocessing and collation, see `collating_graphormer.py` """GraphormerConfig""": ["""spatial_pos_max"""], # used internally in the configuration class file """T5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally """MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], """UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], # used internally in the configuration class file """LongT5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file """SwitchTransformersConfig""": ["""feed_forward_proj"""], # having default values other than `1e-5` - we can't fix them without breaking """BioGptConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """GLPNConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """SegformerConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """CvtConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """PerceiverConfig""": ["""layer_norm_eps"""], # used internally to calculate the feature size """InformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate `mlp_dim` """SamVisionConfig""": ["""mlp_ratio"""], # For (head) training, but so far not implemented """ClapAudioConfig""": ["""num_classes"""], # Not used, but providing useful information to users """SpeechT5HifiGanConfig""": ["""sampling_rate"""], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { """CLIPSegConfig""": True, """DeformableDetrConfig""": True, """DetaConfig""": True, """DinatConfig""": True, """DonutSwinConfig""": True, """EfficientFormerConfig""": True, """FSMTConfig""": True, """JukeboxConfig""": True, """LayoutLMv2Config""": True, """MaskFormerSwinConfig""": True, """MT5Config""": True, """NatConfig""": True, """OneFormerConfig""": True, """PerceiverConfig""": True, """RagConfig""": True, """SpeechT5Config""": True, """SwinConfig""": True, """Swin2SRConfig""": True, """Swinv2Config""": True, """SwitchTransformersConfig""": True, """TableTransformerConfig""": True, """TapasConfig""": True, """TransfoXLConfig""": True, """UniSpeechConfig""": True, """UniSpeechSatConfig""": True, """WavLMConfig""": True, """WhisperConfig""": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) """JukeboxPriorConfig""": True, # TODO: @Younes (for `is_decoder`) """Pix2StructTextConfig""": True, } ) def lowercase ( _snake_case : List[Any] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] ) ->Tuple: """simple docstring""" __snake_case : List[Any] = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f"""config.{attribute}""" in modeling_source or f"""getattr(config, \"{attribute}\"""" in modeling_source or f"""getattr(self.config, \"{attribute}\"""" in modeling_source ): __snake_case : List[str] = True # Deal with multi-line cases elif ( re.search( rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _snake_case , ) is not None ): __snake_case : Optional[Any] = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: __snake_case : List[str] = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files __snake_case : Optional[int] = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] __snake_case : Tuple = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed __snake_case : Union[str, Any] = True if not attribute_used: __snake_case : List[Any] = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: __snake_case : str = True elif attribute in ["tie_word_embeddings"] and default_value is False: __snake_case : str = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: __snake_case : Optional[int] = True elif attribute.endswith('''_token_id''' ): __snake_case : Dict = True # configuration class specific cases if not case_allowed: __snake_case : Optional[Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) __snake_case : Tuple = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def lowercase ( _snake_case : str ) ->int: """simple docstring""" __snake_case : Union[str, Any] = dict(inspect.signature(config_class.__init__ ).parameters ) __snake_case : List[Any] = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] __snake_case : Optional[Any] = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass __snake_case : Any = {} if len(config_class.attribute_map ) > 0: __snake_case : Dict = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files __snake_case : Optional[int] = inspect.getsourcefile(_snake_case ) __snake_case : Union[str, Any] = os.path.dirname(_snake_case ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. __snake_case : Dict = [os.path.join(_snake_case , _snake_case ) for fn in os.listdir(_snake_case ) if fn.startswith('''modeling_''' )] # Get the source code strings __snake_case : str = [] for path in modeling_paths: if os.path.isfile(_snake_case ): with open(_snake_case ) as fp: modeling_sources.append(fp.read() ) __snake_case : Any = [] for config_param, default_value in zip(_snake_case , _snake_case ): # `attributes` here is all the variant names for `config_param` __snake_case : List[str] = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(_snake_case , _snake_case , _snake_case , _snake_case ): unused_attributes.append(attributes[0] ) return sorted(_snake_case ) def lowercase ( ) ->Any: """simple docstring""" __snake_case : Tuple = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) __snake_case : Union[str, Any] = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda _snake_case : inspect.isclass(_snake_case ) and issubclass(_snake_case , _snake_case ) and inspect.getmodule(_snake_case ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: __snake_case : str = check_config_attributes_being_used(_snake_case ) if len(_snake_case ) > 0: __snake_case : Optional[int] = unused_attributes if len(_snake_case ) > 0: __snake_case : List[Any] = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += f"""{name}: {attributes}\n""" raise ValueError(_snake_case ) if __name__ == "__main__": check_config_attributes()
351
"""simple docstring""" def lowercase ( _snake_case : int = 100 ) ->int: """simple docstring""" __snake_case : str = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Dict = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'{solution() = }')
24
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _UpperCAmelCase : '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE (*a_ , **a_ ): '''simple docstring''' pass def lowercase ( _snake_case : Tuple ) ->Union[str, Any]: """simple docstring""" return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. SCREAMING_SNAKE_CASE : Union[str, Any] = ( """https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png""" ) @is_pipeline_test @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = pipeline( '''document-question-answering''' , model=a_ , tokenizer=a_ , image_processor=a_ ) __snake_case : Any = INVOICE_URL __snake_case : List[str] = list(zip(*apply_tesseract(load_image(a_ ) , a_ , '''''' ) ) ) __snake_case : Any = '''What is the placebo?''' __snake_case : Any = [ { '''image''': load_image(a_ ), '''question''': question, }, { '''image''': image, '''question''': question, }, { '''image''': image, '''question''': question, '''word_boxes''': word_boxes, }, ] return dqa_pipeline, examples def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = dqa_pipeline(a_ , top_k=2 ) self.assertEqual( a_ , [ [ {'''score''': ANY(a_ ), '''answer''': ANY(a_ ), '''start''': ANY(a_ ), '''end''': ANY(a_ )}, {'''score''': ANY(a_ ), '''answer''': ANY(a_ ), '''start''': ANY(a_ ), '''end''': ANY(a_ )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' ) __snake_case : Union[str, Any] = INVOICE_URL __snake_case : Tuple = '''How many cats are there?''' __snake_case : Tuple = [ {'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39}, {'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40}, ] __snake_case : Union[str, Any] = dqa_pipeline(image=a_ , question=a_ , top_k=2 ) self.assertEqual(nested_simplify(a_ , decimals=4 ) , a_ ) __snake_case : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual(nested_simplify(a_ , decimals=4 ) , a_ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably __snake_case : List[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __snake_case : List[Any] = dqa_pipeline(image=a_ , question=a_ , top_k=2 ) self.assertEqual(a_ , [] ) # We can optionnally pass directly the words and bounding boxes __snake_case : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __snake_case : Union[str, Any] = [] __snake_case : int = [] __snake_case : List[str] = dqa_pipeline(image=a_ , question=a_ , words=a_ , boxes=a_ , top_k=2 ) self.assertEqual(a_ , [] ) @slow @require_torch @require_detectrona @require_pytesseract def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = pipeline( '''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , ) __snake_case : str = INVOICE_URL __snake_case : Any = '''What is the invoice number?''' __snake_case : Union[str, Any] = dqa_pipeline(image=a_ , question=a_ , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case : Dict = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ [ {'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = pipeline( '''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , ) __snake_case : str = INVOICE_URL __snake_case : List[str] = '''What is the invoice number?''' __snake_case : Tuple = dqa_pipeline(image=a_ , question=a_ , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case : str = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case : Optional[int] = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ [ {'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, {'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = AutoTokenizer.from_pretrained( '''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a_ ) __snake_case : int = pipeline( '''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a_ , revision='''3dc6de3''' , ) __snake_case : int = INVOICE_URL __snake_case : List[str] = '''What is the invoice number?''' __snake_case : Union[str, Any] = dqa_pipeline(image=a_ , question=a_ , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) __snake_case : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) __snake_case : Tuple = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ [ {'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] ] * 2 , ) __snake_case : Union[str, Any] = list(zip(*apply_tesseract(load_image(a_ ) , a_ , '''''' ) ) ) # This model should also work if `image` is set to None __snake_case : Dict = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = AutoTokenizer.from_pretrained( '''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a_ ) __snake_case : int = pipeline( '''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a_ , revision='''3dc6de3''' , max_seq_len=50 , ) __snake_case : int = INVOICE_URL __snake_case : Optional[int] = '''What is the invoice number?''' __snake_case : str = dqa_pipeline(image=a_ , question=a_ , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) __snake_case : List[Any] = dqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ [ {'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] ] * 2 , ) __snake_case : Union[str, Any] = list(zip(*apply_tesseract(load_image(a_ ) , a_ , '''''' ) ) ) # This model should also work if `image` is set to None __snake_case : Any = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, {'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16}, ] , ) @slow @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = pipeline( '''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , ) __snake_case : int = INVOICE_URL __snake_case : Optional[Any] = '''What is the invoice number?''' __snake_case : Optional[int] = dqa_pipeline(image=a_ , question=a_ , top_k=2 ) self.assertEqual(nested_simplify(a_ , decimals=4 ) , [{'''answer''': '''us-001'''}] ) @require_tf @unittest.skip('''Document question answering not implemented in TF''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass
352
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__) @dataclass class _UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ =10000 lowerCamelCase__ =None lowerCamelCase__ =None class _UpperCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCamelCase__ =ParquetConfig def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __snake_case : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a_ , (str, list, tuple) ): __snake_case : Union[str, Any] = data_files if isinstance(a_ , a_ ): __snake_case : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __snake_case : int = [] for split_name, files in data_files.items(): if isinstance(a_ , a_ ): __snake_case : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : int = [dl_manager.iter_files(a_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(a_ ): with open(a_ , '''rb''' ) as f: __snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) ) break splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) ) return splits def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ): with open(a_ , '''rb''' ) as f: __snake_case : int = pq.ParquetFile(a_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __snake_case : Dict = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" ) raise
24
0
"""simple docstring""" from collections.abc import Callable def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: __snake_case : List[str] = mid else: __snake_case : str = mid __snake_case : str = start + (end - start) / 2.0 return mid def lowercase ( _snake_case : float ) ->float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
353
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __snake_case : Dict = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = '''sshleifer/tiny-gpt2''' __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = '''sgugger/tiny-distilbert-classification''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) __snake_case : Optional[Any] = TensorFlowBenchmark(a_ ) __snake_case : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Any = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ ) __snake_case : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = '''sshleifer/tiny-gpt2''' __snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : Dict = TensorFlowBenchmark(a_ , [config] ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : int = TensorFlowBenchmark(a_ ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = '''sshleifer/tiny-gpt2''' __snake_case : Dict = AutoConfig.from_pretrained(a_ ) __snake_case : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] ) __snake_case : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random''' __snake_case : Tuple = AutoConfig.from_pretrained(a_ ) __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) __snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] ) __snake_case : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = '''sshleifer/tiny-gpt2''' __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , ) __snake_case : Optional[int] = TensorFlowBenchmark(a_ ) __snake_case : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , ) __snake_case : Union[str, Any] = TensorFlowBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(a_ ): self.assertTrue(hasattr(a_ , '''sequential''' ) ) self.assertTrue(hasattr(a_ , '''cumulative''' ) ) self.assertTrue(hasattr(a_ , '''current''' ) ) self.assertTrue(hasattr(a_ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , ) __snake_case : List[Any] = TensorFlowBenchmark(a_ ) __snake_case : Optional[int] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
24
0
"""simple docstring""" import json import sys def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[int] ) ->List[str]: """simple docstring""" with open(_snake_case , encoding='''utf-8''' ) as f: __snake_case : Optional[int] = json.load(_snake_case ) __snake_case : Optional[int] = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' '''] for benchmark_name in sorted(_snake_case ): __snake_case : List[str] = results[benchmark_name] __snake_case : List[str] = benchmark_name.split('''/''' )[-1] output_md.append(f"""### Benchmark: {benchmark_file_name}""" ) __snake_case : Optional[int] = '''| metric |''' __snake_case : Any = '''|--------|''' __snake_case : Dict = '''| new / old (diff) |''' for metric_name in sorted(_snake_case ): __snake_case : List[Any] = benchmark_res[metric_name] __snake_case : Any = metric_vals['''new'''] __snake_case : str = metric_vals.get('''old''' , _snake_case ) __snake_case : Tuple = metric_vals.get('''diff''' , _snake_case ) __snake_case : Any = f""" {new_val:f}""" if isinstance(_snake_case , (int, float) ) else '''None''' if old_val is not None: val_str += f""" / {old_val:f}""" if isinstance(_snake_case , (int, float) ) else "None" if dif_val is not None: val_str += f""" ({dif_val:f})""" if isinstance(_snake_case , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('''</details>''' ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f: f.writelines('''\n'''.join(_snake_case ) ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : str = sys.argv[1] SCREAMING_SNAKE_CASE : Dict = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
354
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: SCREAMING_SNAKE_CASE : Tuple = None try: import msvcrt except ImportError: SCREAMING_SNAKE_CASE : List[str] = None try: import fcntl except ImportError: SCREAMING_SNAKE_CASE : Tuple = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: SCREAMING_SNAKE_CASE : List[str] = OSError # Data # ------------------------------------------------ SCREAMING_SNAKE_CASE : List[Any] = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] SCREAMING_SNAKE_CASE : List[Any] = """3.0.12""" SCREAMING_SNAKE_CASE : int = None def lowercase ( ) ->str: """simple docstring""" global _logger __snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ ) return _logger class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[int] = lock_file return None def __str__(self ): '''simple docstring''' __snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired.""" return temp class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ ): '''simple docstring''' __snake_case : Optional[Any] = lock return None def __enter__(self ): '''simple docstring''' return self.lock def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.lock.release() return None class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long __snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ ) # The path to the lock file. __snake_case : str = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __snake_case : Dict = None # The default timeout value. __snake_case : List[Any] = timeout # We use this lock primarily for the lock counter. __snake_case : Tuple = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __snake_case : Optional[Any] = 0 return None @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = float(a_ ) return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' raise NotImplementedError() @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ): '''simple docstring''' if timeout is None: __snake_case : List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __snake_case : Optional[int] = id(self ) __snake_case : str = self._lock_file __snake_case : Optional[int] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(a_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __snake_case : Optional[int] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE (self , a_=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __snake_case : Tuple = id(self ) __snake_case : str = self._lock_file logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __snake_case : Dict = 0 logger().debug(f"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__(self ): '''simple docstring''' self.acquire() return self def __exit__(self , a_ , a_ , a_ ): '''simple docstring''' self.release() return None def __del__(self ): '''simple docstring''' self.release(force=a_ ) return None def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : Any = os.path.basename(a_ ) if len(a_ ) > max_length and max_length > 0: __snake_case : List[Any] = os.path.dirname(a_ ) __snake_case : Any = str(hash(a_ ) ) __snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(a_ , a_ ) else: return path class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) __snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __snake_case : Any = os.open(self._lock_file , a_ ) except OSError: pass else: try: msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(a_ ) else: __snake_case : Dict = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Dict = None msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 ) os.close(a_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=-1 , a_=None ): '''simple docstring''' __snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax super().__init__(a_ , timeout=a_ , max_filename_length=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC __snake_case : List[str] = os.open(self._lock_file , a_ ) try: fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(a_ ) else: __snake_case : Optional[int] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self._lock_file_fd __snake_case : Tuple = None fcntl.flock(a_ , fcntl.LOCK_UN ) os.close(a_ ) return None class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __snake_case : Tuple = os.open(self._lock_file , a_ ) except OSError: pass else: __snake_case : List[Any] = fd return None def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' os.close(self._lock_file_fd ) __snake_case : int = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None SCREAMING_SNAKE_CASE : Dict = None if msvcrt: SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock elif fcntl: SCREAMING_SNAKE_CASE : List[str] = UnixFileLock else: SCREAMING_SNAKE_CASE : str = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
24
0
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_28, '''min_length''': 12, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_42, '''min_length''': 56, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6}, } } __snake_case : Dict = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 1_28, '''task_specific_params.summarization.min_length''': 12, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 1_42, '''task_specific_params.summarization_cnn.min_length''': 56, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 62, '''task_specific_params.summarization_xsum.min_length''': 11, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(a_ ) , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(a_ ) , x.transpose() ) ) __snake_case : Dict = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(a_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = np.random.randn(3 , 4 ) __snake_case : str = torch.tensor(a_ ) self.assertTrue(np.allclose(transpose(a_ ) , transpose(a_ ).numpy() ) ) __snake_case : Dict = np.random.randn(3 , 4 , 5 ) __snake_case : List[Any] = torch.tensor(a_ ) self.assertTrue(np.allclose(transpose(a_ , axes=(1, 2, 0) ) , transpose(a_ , axes=(1, 2, 0) ).numpy() ) ) @require_tf def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = np.random.randn(3 , 4 ) __snake_case : Optional[int] = tf.constant(a_ ) self.assertTrue(np.allclose(transpose(a_ ) , transpose(a_ ).numpy() ) ) __snake_case : int = np.random.randn(3 , 4 , 5 ) __snake_case : List[Any] = tf.constant(a_ ) self.assertTrue(np.allclose(transpose(a_ , axes=(1, 2, 0) ) , transpose(a_ , axes=(1, 2, 0) ).numpy() ) ) @require_flax def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = np.random.randn(3 , 4 ) __snake_case : Tuple = jnp.array(a_ ) self.assertTrue(np.allclose(transpose(a_ ) , np.asarray(transpose(a_ ) ) ) ) __snake_case : Any = np.random.randn(3 , 4 , 5 ) __snake_case : Optional[Any] = jnp.array(a_ ) self.assertTrue(np.allclose(transpose(a_ , axes=(1, 2, 0) ) , np.asarray(transpose(a_ , axes=(1, 2, 0) ) ) ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(a_ , (4, 3) ) , np.reshape(a_ , (4, 3) ) ) ) __snake_case : Optional[int] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(a_ , (12, 5) ) , np.reshape(a_ , (12, 5) ) ) ) @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = np.random.randn(3 , 4 ) __snake_case : Union[str, Any] = torch.tensor(a_ ) self.assertTrue(np.allclose(reshape(a_ , (4, 3) ) , reshape(a_ , (4, 3) ).numpy() ) ) __snake_case : str = np.random.randn(3 , 4 , 5 ) __snake_case : Optional[Any] = torch.tensor(a_ ) self.assertTrue(np.allclose(reshape(a_ , (12, 5) ) , reshape(a_ , (12, 5) ).numpy() ) ) @require_tf def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = np.random.randn(3 , 4 ) __snake_case : str = tf.constant(a_ ) self.assertTrue(np.allclose(reshape(a_ , (4, 3) ) , reshape(a_ , (4, 3) ).numpy() ) ) __snake_case : Optional[int] = np.random.randn(3 , 4 , 5 ) __snake_case : List[Any] = tf.constant(a_ ) self.assertTrue(np.allclose(reshape(a_ , (12, 5) ) , reshape(a_ , (12, 5) ).numpy() ) ) @require_flax def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = np.random.randn(3 , 4 ) __snake_case : str = jnp.array(a_ ) self.assertTrue(np.allclose(reshape(a_ , (4, 3) ) , np.asarray(reshape(a_ , (4, 3) ) ) ) ) __snake_case : str = np.random.randn(3 , 4 , 5 ) __snake_case : str = jnp.array(a_ ) self.assertTrue(np.allclose(reshape(a_ , (12, 5) ) , np.asarray(reshape(a_ , (12, 5) ) ) ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(a_ ) , np.squeeze(a_ ) ) ) __snake_case : str = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(a_ , axis=2 ) , np.squeeze(a_ , axis=2 ) ) ) @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = np.random.randn(1 , 3 , 4 ) __snake_case : List[str] = torch.tensor(a_ ) self.assertTrue(np.allclose(squeeze(a_ ) , squeeze(a_ ).numpy() ) ) __snake_case : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 ) __snake_case : str = torch.tensor(a_ ) self.assertTrue(np.allclose(squeeze(a_ , axis=2 ) , squeeze(a_ , axis=2 ).numpy() ) ) @require_tf def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = np.random.randn(1 , 3 , 4 ) __snake_case : Dict = tf.constant(a_ ) self.assertTrue(np.allclose(squeeze(a_ ) , squeeze(a_ ).numpy() ) ) __snake_case : Dict = np.random.randn(1 , 4 , 1 , 5 ) __snake_case : int = tf.constant(a_ ) self.assertTrue(np.allclose(squeeze(a_ , axis=2 ) , squeeze(a_ , axis=2 ).numpy() ) ) @require_flax def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = np.random.randn(1 , 3 , 4 ) __snake_case : Any = jnp.array(a_ ) self.assertTrue(np.allclose(squeeze(a_ ) , np.asarray(squeeze(a_ ) ) ) ) __snake_case : Optional[int] = np.random.randn(1 , 4 , 1 , 5 ) __snake_case : Tuple = jnp.array(a_ ) self.assertTrue(np.allclose(squeeze(a_ , axis=2 ) , np.asarray(squeeze(a_ , axis=2 ) ) ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(a_ , axis=1 ) , np.expand_dims(a_ , axis=1 ) ) ) @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = np.random.randn(3 , 4 ) __snake_case : Optional[int] = torch.tensor(a_ ) self.assertTrue(np.allclose(expand_dims(a_ , axis=1 ) , expand_dims(a_ , axis=1 ).numpy() ) ) @require_tf def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = np.random.randn(3 , 4 ) __snake_case : Optional[Any] = tf.constant(a_ ) self.assertTrue(np.allclose(expand_dims(a_ , axis=1 ) , expand_dims(a_ , axis=1 ).numpy() ) ) @require_flax def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = np.random.randn(3 , 4 ) __snake_case : List[str] = jnp.array(a_ ) self.assertTrue(np.allclose(expand_dims(a_ , axis=1 ) , np.asarray(expand_dims(a_ , axis=1 ) ) ) )
355
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ): '''simple docstring''' __snake_case : Any = parent __snake_case : int = batch_size __snake_case : Dict = seq_length __snake_case : List[str] = is_training __snake_case : List[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : str = vocab_size __snake_case : int = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : str = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : Optional[Any] = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Any = scope __snake_case : Any = range_bbox def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : List[str] = bbox[i, j, 3] __snake_case : Any = bbox[i, j, 1] __snake_case : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : List[str] = bbox[i, j, 2] __snake_case : Union[str, Any] = bbox[i, j, 0] __snake_case : Dict = t __snake_case : Optional[int] = None if self.use_input_mask: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __snake_case : Dict = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[str] = None __snake_case : Union[str, Any] = None if self.use_labels: __snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ ) __snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ ) __snake_case : List[str] = model(a_ , bbox=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : List[str] = LiltForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Tuple = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model( a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Dict = config_and_inputs __snake_case : Any = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ =( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Dict = type self.model_tester.create_and_check_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = LiltModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @require_torch @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ ) __snake_case : Dict = torch.tensor([[1, 2]] , device=a_ ) __snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ ) # forward pass with torch.no_grad(): __snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ ) __snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] ) __snake_case : str = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , ) self.assertTrue(outputs.last_hidden_state.shape , a_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
24
0
"""simple docstring""" from itertools import permutations def lowercase ( _snake_case : tuple ) ->bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __snake_case : str = [7, 11, 13, 17] for i, test in enumerate(_snake_case ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( _snake_case : int = 10 ) ->int: """simple docstring""" return sum( int(''''''.join(map(_snake_case , _snake_case ) ) ) for num in permutations(range(_snake_case ) ) if is_substring_divisible(_snake_case ) ) if __name__ == "__main__": print(F'{solution() = }')
356
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): '''simple docstring''' __snake_case : List[Any] = parent __snake_case : List[Any] = batch_size __snake_case : str = seq_length __snake_case : Any = is_training __snake_case : Any = use_input_mask __snake_case : str = use_token_type_ids __snake_case : Dict = use_labels __snake_case : int = vocab_size __snake_case : Union[str, Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : str = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : str = hidden_act __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : Dict = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : Union[str, Any] = initializer_range __snake_case : str = num_labels __snake_case : Dict = num_choices __snake_case : Optional[int] = scope def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Dict = None if self.use_input_mask: __snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Tuple = None __snake_case : List[str] = None __snake_case : Dict = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = DistilBertModel(config=a_ ) model.to(a_ ) model.eval() __snake_case : int = model(a_ , a_ ) __snake_case : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() __snake_case : Optional[Any] = model( a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Any = self.num_labels __snake_case : Optional[int] = DistilBertForSequenceClassification(a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Union[str, Any] = self.num_labels __snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ ) model.to(a_ ) model.eval() __snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[Any] = self.num_choices __snake_case : Any = DistilBertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() __snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[int] = model( a_ , attention_mask=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs __snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ =( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True lowerCamelCase__ =True def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = DistilBertModelTester(self ) __snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ ) @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Tuple = DistilBertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __snake_case : List[str] = True __snake_case : Tuple = model_class(config=a_ ) __snake_case : Any = self._prepare_for_class(a_ , a_ ) __snake_case : Dict = torch.jit.trace( a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) ) __snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ ) loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case : List[Any] = model(a_ , attention_mask=a_ )[0] __snake_case : Tuple = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , a_ ) __snake_case : Optional[int] = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
24
0
"""simple docstring""" import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin SCREAMING_SNAKE_CASE : Union[str, Any] = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=16 , a_=13 , a_=7 , a_=14 , a_=10 , a_=19 , a_=5 , a_=4 , a_=True , a_=16 , a_=2 , a_=4 , a_=4 , a_="gelu" , a_=0.1 , a_=0.1 , a_=[1, 2, 3, 4, 5] , a_=25 , a_=5 , ): '''simple docstring''' __snake_case : List[str] = d_model __snake_case : str = parent __snake_case : Union[str, Any] = batch_size __snake_case : List[Any] = prediction_length __snake_case : Tuple = context_length __snake_case : Optional[int] = cardinality __snake_case : Tuple = num_time_features __snake_case : Union[str, Any] = lags_sequence __snake_case : Tuple = embedding_dimension __snake_case : int = is_training __snake_case : List[str] = hidden_size __snake_case : Tuple = num_hidden_layers __snake_case : Optional[Any] = num_attention_heads __snake_case : int = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : Union[str, Any] = context_length __snake_case : str = prediction_length + label_length __snake_case : Union[str, Any] = label_length __snake_case : Dict = moving_average __snake_case : Tuple = autocorrelation_factor def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Any = config.context_length + max(config.lags_sequence ) __snake_case : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) __snake_case : Any = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) __snake_case : Union[str, Any] = floats_tensor([self.batch_size, _past_length] ) __snake_case : Optional[int] = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs __snake_case : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) __snake_case : Any = floats_tensor([self.batch_size, config.prediction_length] ) __snake_case : Dict = { '''past_values''': past_values, '''static_categorical_features''': static_categorical_features, '''past_time_features''': past_time_features, '''past_observed_mask''': past_observed_mask, '''future_time_features''': future_time_features, '''future_values''': future_values, } return inputs_dict def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = self.get_config() __snake_case : List[Any] = self.prepare_autoformer_inputs_dict(a_ ) return config, inputs_dict def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = self.prepare_config_and_inputs() return config, inputs_dict def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : int = AutoformerModel(config=a_ ).to(a_ ).eval() __snake_case : Union[str, Any] = model(**a_ ) __snake_case : str = outputs.encoder_last_hidden_state __snake_case : int = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: __snake_case : int = model.get_encoder() encoder.save_pretrained(a_ ) __snake_case : Any = AutoformerEncoder.from_pretrained(a_ ).to(a_ ) __snake_case : List[str] = model.create_network_inputs(**a_ ) __snake_case : Union[str, Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) __snake_case : int = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) __snake_case : Optional[int] = encoder(inputs_embeds=a_ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) __snake_case : List[Any] = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) __snake_case : int = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) __snake_case : Tuple = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) __snake_case : List[str] = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: __snake_case : Dict = model.get_decoder() decoder.save_pretrained(a_ ) __snake_case : List[str] = AutoformerDecoder.from_pretrained(a_ ).to(a_ ) __snake_case : Optional[int] = decoder( trend=a_ , inputs_embeds=a_ , encoder_hidden_states=a_ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else () lowerCamelCase__ =(AutoformerForPrediction,) if is_torch_available() else () lowerCamelCase__ ={'feature-extraction': AutoformerModel} if is_torch_available() else {} lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = AutoformerModelTester(self ) __snake_case : Dict = ConfigTester(self , config_class=a_ , has_text_modality=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: __snake_case : Dict = model_class(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(a_ ) __snake_case : List[str] = model_class.from_pretrained(a_ , output_loading_info=a_ ) self.assertEqual(info['''missing_keys'''] , [] ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*a_ ) @unittest.skip(reason='''Model has no tokens embeddings''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = inspect.signature(getattr(a_ , '''forward''' ) ) # The main input is the name of the argument after `self` __snake_case : Optional[int] = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = model_class(a_ ) __snake_case : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : str = [*signature.parameters.keys()] __snake_case : str = [ '''past_values''', '''past_time_features''', '''past_observed_mask''', '''static_categorical_features''', '''static_real_features''', '''future_values''', '''future_time_features''', ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append('''future_observed_mask''' ) expected_arg_names.extend( [ '''decoder_attention_mask''', '''head_mask''', '''decoder_head_mask''', '''cross_attn_head_mask''', '''encoder_outputs''', '''past_key_values''', '''output_hidden_states''', '''output_attentions''', '''use_cache''', '''return_dict''', ] ) self.assertListEqual(arg_names[: len(a_ )] , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : List[str] = True __snake_case : Optional[int] = getattr(self.model_tester , '''seq_length''' , a_ ) __snake_case : Optional[int] = getattr(self.model_tester , '''decoder_seq_length''' , a_ ) __snake_case : List[Any] = getattr(self.model_tester , '''encoder_seq_length''' , a_ ) __snake_case : Any = getattr(self.model_tester , '''d_model''' , a_ ) __snake_case : int = getattr(self.model_tester , '''num_attention_heads''' , a_ ) __snake_case : Optional[int] = d_model // num_attention_heads for model_class in self.all_model_classes: __snake_case : List[str] = True __snake_case : List[str] = False __snake_case : Tuple = True __snake_case : List[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(a_ , a_ ) ) __snake_case : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Any = True __snake_case : List[Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): __snake_case : Dict = model(**self._prepare_for_class(a_ , a_ ) ) __snake_case : Union[str, Any] = outputs.encoder_attentions self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) __snake_case : List[str] = len(a_ ) __snake_case : Optional[int] = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(a_ , a_ ) # decoder attentions __snake_case : Optional[Any] = outputs.decoder_attentions self.assertIsInstance(a_ , (list, tuple) ) self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions __snake_case : Optional[int] = outputs.cross_attentions self.assertIsInstance(a_ , (list, tuple) ) self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine __snake_case : Any = True __snake_case : Optional[Any] = True __snake_case : Dict = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): __snake_case : List[Any] = model(**self._prepare_for_class(a_ , a_ ) ) self.assertEqual(out_len + 2 , len(a_ ) ) __snake_case : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def lowercase ( _snake_case : Optional[Any]="train-batch.pt" ) ->Dict: """simple docstring""" __snake_case : Tuple = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=_snake_case , repo_type='''dataset''' ) __snake_case : int = torch.load(_snake_case , map_location=_snake_case ) return batch @require_torch @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ ) __snake_case : Union[str, Any] = prepare_batch() with torch.no_grad(): __snake_case : int = model( past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0] __snake_case : Any = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , a_ ) __snake_case : Optional[Any] = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=a_ ) self.assertTrue(torch.allclose(output[0, :3, :3] , a_ , atol=a_ ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ ) __snake_case : Tuple = prepare_batch('''val-batch.pt''' ) with torch.no_grad(): __snake_case : Any = model( past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state __snake_case : List[Any] = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , a_ ) __snake_case : Union[str, Any] = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=a_ ) self.assertTrue(torch.allclose(output[0, :3, :3] , a_ , atol=a_ ) ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ ) __snake_case : Optional[int] = prepare_batch('''val-batch.pt''' ) with torch.no_grad(): __snake_case : List[str] = model.generate( static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , ) __snake_case : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , a_ ) __snake_case : Optional[int] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=a_ ) __snake_case : List[str] = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a_ , rtol=1E-1 ) )
357
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]: """simple docstring""" def get_masked_lm_array(_snake_case : str ): __snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : str = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Any = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_array(_snake_case : str ): __snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_layer_array(_snake_case : int , _snake_case : str ): __snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case ) if "kernel" in name: __snake_case : Optional[Any] = array.transpose() return torch.from_numpy(_snake_case ) def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ): __snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" __snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case ) __snake_case : int = array.reshape(_snake_case ) if "kernel" in name: __snake_case : Optional[int] = array.transpose() return torch.from_numpy(_snake_case ) print(f"""Loading model based on config from {config_path}...""" ) __snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case ) __snake_case : Dict = BertForMaskedLM(_snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __snake_case : BertSelfAttention = layer.attention.self __snake_case : int = get_encoder_attention_layer_array( _snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __snake_case : str = get_encoder_attention_layer_array( _snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __snake_case : List[Any] = get_encoder_attention_layer_array( _snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __snake_case : Union[str, Any] = get_encoder_attention_layer_array( _snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case : BertSelfOutput = layer.attention.output __snake_case : Dict = get_encoder_attention_layer_array( _snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' ) __snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' ) # Intermediate __snake_case : BertIntermediate = layer.intermediate __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' ) __snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' ) # Output __snake_case : BertOutput = layer.output __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' ) __snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' ) __snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' ) __snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' ) # Embeddings __snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' ) __snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' ) __snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' ) __snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __snake_case : Optional[Any] = model.cls.predictions.transform __snake_case : Dict = get_masked_lm_array('''dense/kernel''' ) __snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' ) __snake_case : str = get_masked_lm_array('''layer_norm/gamma''' ) __snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' ) __snake_case : Tuple = get_masked_lm_array('''embedding_table''' ) # Pooling __snake_case : Optional[Any] = BertPooler(config=_snake_case ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) __snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(_snake_case ) # Integration test - should load without any errors ;) __snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
24
0
"""simple docstring""" def lowercase ( _snake_case : list ) ->list: """simple docstring""" if len(_snake_case ) <= 1: return lst __snake_case : List[str] = 1 while i < len(_snake_case ): if lst[i - 1] <= lst[i]: i += 1 else: __snake_case : Any = lst[i], lst[i - 1] i -= 1 if i == 0: __snake_case : Any = 1 return lst if __name__ == "__main__": SCREAMING_SNAKE_CASE : str = input("""Enter numbers separated by a comma:\n""").strip() SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
358
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ): '''simple docstring''' super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ ) __snake_case : Union[str, Any] = Sql( cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = None __snake_case : Dict = None __snake_case : Dict = None __snake_case : List[str] = None self.builder.download_and_prepare( download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , ) # Build dataset for splits __snake_case : Any = self.builder.as_dataset( split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory ) return dataset class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) __snake_case : List[str] = dataset __snake_case : Tuple = name __snake_case : Optional[int] = con __snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __snake_case : Dict = num_proc __snake_case : Dict = to_sql_kwargs def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ ) __snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ ) __snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ ) __snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs ) return written def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case , __snake_case , __snake_case : Optional[Any] = args __snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __snake_case : Dict = query_table( table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __snake_case : Tuple = batch.to_pandas() __snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ ) return num_rows or len(a_ ) def SCREAMING_SNAKE_CASE (self , a_ , **a_ ): '''simple docstring''' __snake_case : int = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
24
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Union[str, Any] = { """openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""", } # fmt: off SCREAMING_SNAKE_CASE : int = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377, 1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211, 4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786, 1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791, 1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409, 3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361 ] SCREAMING_SNAKE_CASE : Tuple = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793, 1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675, 2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865, 4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362 ] class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='whisper' lowerCamelCase__ =['past_key_values'] lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self , a_=5_18_65 , a_=80 , a_=6 , a_=4 , a_=6 , a_=4 , a_=15_36 , a_=15_36 , a_=0.0 , a_=0.0 , a_=5_02_57 , a_=True , a_=True , a_="gelu" , a_=2_56 , a_=0.0 , a_=0.0 , a_=0.0 , a_=0.02 , a_=False , a_=15_00 , a_=4_48 , a_=5_02_56 , a_=5_02_56 , a_=5_02_56 , a_=None , a_=[2_20, 5_02_56] , a_=False , a_=2_56 , a_=False , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_=7 , **a_ , ): '''simple docstring''' __snake_case : Any = vocab_size __snake_case : Optional[int] = num_mel_bins __snake_case : int = d_model __snake_case : Any = encoder_layers __snake_case : int = encoder_attention_heads __snake_case : List[Any] = decoder_layers __snake_case : Any = decoder_attention_heads __snake_case : Optional[int] = decoder_ffn_dim __snake_case : Optional[Any] = encoder_ffn_dim __snake_case : Tuple = dropout __snake_case : Optional[int] = attention_dropout __snake_case : Optional[int] = activation_dropout __snake_case : List[str] = activation_function __snake_case : Any = init_std __snake_case : Tuple = encoder_layerdrop __snake_case : Dict = decoder_layerdrop __snake_case : Optional[Any] = use_cache __snake_case : Optional[Any] = encoder_layers __snake_case : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True __snake_case : Optional[Any] = max_source_positions __snake_case : int = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __snake_case : List[Any] = classifier_proj_size __snake_case : str = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __snake_case : List[Any] = apply_spec_augment __snake_case : List[Any] = mask_time_prob __snake_case : int = mask_time_length __snake_case : Any = mask_time_min_masks __snake_case : Union[str, Any] = mask_feature_prob __snake_case : Optional[Any] = mask_feature_length __snake_case : List[str] = mask_feature_min_masks __snake_case : Dict = median_filter_width super().__init__( pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , suppress_tokens=a_ , begin_suppress_tokens=a_ , **a_ , ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: __snake_case : int = {0: '''batch'''} else: __snake_case : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(a_ , direction='''inputs''' ) return common_inputs def SCREAMING_SNAKE_CASE (self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , a_ = 2_20_50 , a_ = 5.0 , a_ = 2_20 , ): '''simple docstring''' __snake_case : Tuple = OrderedDict() __snake_case : List[Any] = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=a_ , framework=a_ , sampling_rate=a_ , time_duration=a_ , frequency=a_ , ) __snake_case : int = encoder_inputs['''input_features'''].shape[2] __snake_case : Dict = encoder_sequence_length // 2 if self.use_past else seq_length __snake_case : Dict = super().generate_dummy_inputs( preprocessor.tokenizer , a_ , a_ , a_ , a_ ) __snake_case : List[Any] = encoder_inputs.pop('''input_features''' ) __snake_case : str = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: __snake_case : Union[str, Any] = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return 1E-3
359
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='lxmert' lowerCamelCase__ ={} def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ): '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[Any] = num_attention_heads __snake_case : int = hidden_act __snake_case : int = intermediate_size __snake_case : Any = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : str = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : List[Any] = num_qa_labels __snake_case : int = num_object_labels __snake_case : Optional[Any] = num_attr_labels __snake_case : Union[str, Any] = l_layers __snake_case : Optional[int] = x_layers __snake_case : Optional[int] = r_layers __snake_case : Tuple = visual_feat_dim __snake_case : Optional[int] = visual_pos_dim __snake_case : Dict = visual_loss_normalizer __snake_case : str = task_matched __snake_case : Optional[Any] = task_mask_lm __snake_case : List[str] = task_obj_predict __snake_case : Optional[Any] = task_qa __snake_case : Any = visual_obj_loss __snake_case : int = visual_attr_loss __snake_case : List[Any] = visual_feat_loss __snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**a_ )
24
0
"""simple docstring""" import argparse from collections import defaultdict def lowercase ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Any ) ->Any: """simple docstring""" __snake_case : Optional[Any] = f"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(_snake_case , '''r''' ) as f: __snake_case : Optional[int] = f.readlines() __snake_case : List[str] = f"""class {class_name}(""" __snake_case : List[str] = f"""{4 * " "}def {test_name}(""" __snake_case : Any = f"""{8 * " "}{correct_line.split()[0]}""" __snake_case : int = f"""{16 * " "}{correct_line.split()[0]}""" __snake_case : Any = False __snake_case : Optional[Any] = False __snake_case : Any = False __snake_case : Dict = False __snake_case : Optional[Any] = 0 __snake_case : Dict = 0 __snake_case : Any = [] for line in lines: if line.startswith(_snake_case ): __snake_case : Dict = True elif in_class and line.startswith(_snake_case ): __snake_case : Union[str, Any] = True elif in_class and in_func and (line.startswith(_snake_case ) or line.startswith(_snake_case )): __snake_case : Any = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: __snake_case : List[Any] = True if in_class and in_func and in_line: if ")" not in line: continue else: __snake_case : List[Any] = True if in_class and in_func and in_line and insert_line: new_lines.append(f"""{spaces * " "}{correct_line}""" ) __snake_case : str = False else: new_lines.append(_snake_case ) with open(_snake_case , '''w''' ) as f: for line in new_lines: f.write(_snake_case ) def lowercase ( _snake_case : Dict , _snake_case : Any=None ) ->List[Any]: """simple docstring""" if fail is not None: with open(_snake_case , '''r''' ) as f: __snake_case : Union[str, Any] = {l.strip() for l in f.readlines()} else: __snake_case : List[Any] = None with open(_snake_case , '''r''' ) as f: __snake_case : int = f.readlines() __snake_case : int = defaultdict(_snake_case ) for line in correct_lines: __snake_case : Any = line.split(''';''' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""") parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None) SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() main(args.correct_filename, args.fail_filename)
360
"""simple docstring""" def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" __snake_case : Tuple = len(_snake_case ) __snake_case : str = sum(_snake_case ) __snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __snake_case : Optional[Any] = True for i in range(1 , s + 1 ): __snake_case : int = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __snake_case : Union[str, Any] = dp[i][j - 1] if arr[i - 1] <= j: __snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __snake_case : List[str] = s - 2 * j break return diff
24
0