code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from __future__ import annotations from random import random from typing import Generic, TypeVar lowerCAmelCase : Tuple = TypeVar("""KT""") lowerCAmelCase : Dict = TypeVar("""VT""") class UpperCamelCase__ ( Generic[KT, VT] ): """simple docstring""" def __init__( self , snake_case__ = "root" , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : Tuple = key _lowerCAmelCase : Union[str, Any] = value _lowerCAmelCase : list[Node[KT, VT]] = [] def __repr__( self ): '''simple docstring''' return F'Node({self.key}: {self.value})' @property def a ( self ): '''simple docstring''' return len(self.forward ) class UpperCamelCase__ ( Generic[KT, VT] ): """simple docstring""" def __init__( self , snake_case__ = 0.5 , snake_case__ = 16 ): '''simple docstring''' _lowerCAmelCase : Node[KT, VT] = Node[KT, VT]() _lowerCAmelCase : Optional[Any] = 0 _lowerCAmelCase : int = p _lowerCAmelCase : Tuple = max_level def __str__( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = list(self ) if len(snake_case__ ) == 0: return F'SkipList(level={self.level})' _lowerCAmelCase : List[Any] = max((len(str(snake_case__ ) ) for item in items) , default=4 ) _lowerCAmelCase : Tuple = max(snake_case__ , 4 ) + 4 _lowerCAmelCase : List[Any] = self.head _lowerCAmelCase : str = [] _lowerCAmelCase : Union[str, Any] = node.forward.copy() lines.append(F'[{node.key}]'.ljust(snake_case__ , '-' ) + '* ' * len(snake_case__ ) ) lines.append(' ' * label_size + '| ' * len(snake_case__ ) ) while len(node.forward ) != 0: _lowerCAmelCase : Union[str, Any] = node.forward[0] lines.append( F'[{node.key}]'.ljust(snake_case__ , '-' ) + ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) ) lines.append(' ' * label_size + '| ' * len(snake_case__ ) ) _lowerCAmelCase : Dict = node.forward lines.append('None'.ljust(snake_case__ ) + '* ' * len(snake_case__ ) ) return F'SkipList(level={self.level})\n' + "\n".join(snake_case__ ) def __iter__( self ): '''simple docstring''' _lowerCAmelCase : Any = self.head while len(node.forward ) != 0: yield node.forward[0].key _lowerCAmelCase : str = node.forward[0] def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = 1 while random() < self.p and level < self.max_level: level += 1 return level def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [] _lowerCAmelCase : int = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: _lowerCAmelCase : int = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(snake_case__ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : str = self._locate_node(snake_case__ ) if node is not None: for i, update_node in enumerate(snake_case__ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: _lowerCAmelCase : Optional[int] = node.forward[i] else: _lowerCAmelCase : Optional[Any] = update_node.forward[:i] def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self._locate_node(snake_case__ ) if node is not None: _lowerCAmelCase : Union[str, Any] = value else: _lowerCAmelCase : str = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , snake_case__ ): update_vector.append(self.head ) _lowerCAmelCase : List[Any] = level _lowerCAmelCase : Optional[int] = Node(snake_case__ , snake_case__ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(snake_case__ ) else: _lowerCAmelCase : Any = new_node def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self._locate_node(snake_case__ ) if node is not None: return node.value return None def lowercase (): """simple docstring""" _lowerCAmelCase : Any = SkipList() skip_list.insert('Key1' , 3 ) skip_list.insert('Key2' , 1_2 ) skip_list.insert('Key3' , 4_1 ) skip_list.insert('Key4' , -1_9 ) _lowerCAmelCase : Any = skip_list.head _lowerCAmelCase : List[str] = {} while node.level != 0: _lowerCAmelCase : str = node.forward[0] _lowerCAmelCase : str = node.value assert len(_A ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 1_2 assert all_values["Key3"] == 4_1 assert all_values["Key4"] == -1_9 def lowercase (): """simple docstring""" _lowerCAmelCase : Union[str, Any] = SkipList() skip_list.insert('Key1' , 1_0 ) skip_list.insert('Key1' , 1_2 ) skip_list.insert('Key5' , 7 ) skip_list.insert('Key7' , 1_0 ) skip_list.insert('Key10' , 5 ) skip_list.insert('Key7' , 7 ) skip_list.insert('Key5' , 5 ) skip_list.insert('Key10' , 1_0 ) _lowerCAmelCase : int = skip_list.head _lowerCAmelCase : Tuple = {} while node.level != 0: _lowerCAmelCase : int = node.forward[0] _lowerCAmelCase : Tuple = node.value if len(_A ) != 4: print() assert len(_A ) == 4 assert all_values["Key1"] == 1_2 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 1_0 def lowercase (): """simple docstring""" _lowerCAmelCase : Tuple = SkipList() assert skip_list.find('Some key' ) is None def lowercase (): """simple docstring""" _lowerCAmelCase : int = SkipList() skip_list.insert('Key2' , 2_0 ) assert skip_list.find('Key2' ) == 2_0 skip_list.insert('Some Key' , 1_0 ) skip_list.insert('Key2' , 8 ) skip_list.insert('V' , 1_3 ) assert skip_list.find('Y' ) is None assert skip_list.find('Key2' ) == 8 assert skip_list.find('Some Key' ) == 1_0 assert skip_list.find('V' ) == 1_3 def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = SkipList() skip_list.delete('Some key' ) assert len(skip_list.head.forward ) == 0 def lowercase (): """simple docstring""" _lowerCAmelCase : Any = SkipList() skip_list.insert('Key1' , 1_2 ) skip_list.insert('V' , 1_3 ) skip_list.insert('X' , 1_4 ) skip_list.insert('Key2' , 1_5 ) skip_list.delete('V' ) skip_list.delete('Key2' ) assert skip_list.find('V' ) is None assert skip_list.find('Key2' ) is None def lowercase (): """simple docstring""" _lowerCAmelCase : str = SkipList() skip_list.insert('Key1' , 1_2 ) skip_list.insert('V' , 1_3 ) skip_list.insert('X' , 1_4 ) skip_list.insert('Key2' , 1_5 ) skip_list.delete('V' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) == 1_4 assert skip_list.find('Key1' ) == 1_2 assert skip_list.find('Key2' ) == 1_5 skip_list.delete('X' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) == 1_2 assert skip_list.find('Key2' ) == 1_5 skip_list.delete('Key1' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) is None assert skip_list.find('Key2' ) == 1_5 skip_list.delete('Key2' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) is None assert skip_list.find('Key2' ) is None def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = SkipList() skip_list.insert('Key1' , 1_2 ) skip_list.insert('V' , 1_3 ) skip_list.insert('X' , 1_4_2 ) skip_list.insert('Key2' , 1_5 ) skip_list.delete('X' ) def traverse_keys(_A ): yield node.key for forward_node in node.forward: yield from traverse_keys(_A ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def lowercase (): """simple docstring""" def is_sorted(_A ): return all(next_item >= item for item, next_item in zip(_A , lst[1:] ) ) _lowerCAmelCase : List[str] = SkipList() for i in range(1_0 ): skip_list.insert(_A , _A ) assert is_sorted(list(_A ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_A ) ) skip_list.insert(-1_2 , -1_2 ) skip_list.insert(7_7 , 7_7 ) assert is_sorted(list(_A ) ) def lowercase (): """simple docstring""" for _ in range(1_0_0 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[int] = SkipList() skip_list.insert(2 , '2' ) skip_list.insert(4 , '4' ) skip_list.insert(6 , '4' ) skip_list.insert(4 , '5' ) skip_list.insert(8 , '4' ) skip_list.insert(9 , '4' ) skip_list.delete(4 ) print(_A ) if __name__ == "__main__": import doctest doctest.testmod() main()
630
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Dict = 'The dog is cute and lives in the garden house' _lowerCAmelCase : List[str] = jnp.array([tokenizer.encode(snake_case__ )] ) _lowerCAmelCase : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _lowerCAmelCase : Tuple = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) _lowerCAmelCase : Union[str, Any] = model(snake_case__ )['last_hidden_state'] self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
630
1
'''simple docstring''' from __future__ import annotations def lowercase (_A = 4 ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = abs(_A ) or 4 return [[1 + x + y * row_size for x in range(_A )] for y in range(_A )] def lowercase (_A ): """simple docstring""" return reverse_row(transpose(_A ) ) # OR.. transpose(reverse_column(matrix)) def lowercase (_A ): """simple docstring""" return reverse_row(reverse_column(_A ) ) # OR.. reverse_column(reverse_row(matrix)) def lowercase (_A ): """simple docstring""" return reverse_column(transpose(_A ) ) # OR.. transpose(reverse_row(matrix)) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = [list(_A ) for x in zip(*_A )] return matrix def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = matrix[::-1] return matrix def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Any = [x[::-1] for x in matrix] return matrix def lowercase (_A ): """simple docstring""" for i in matrix: print(*_A ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = make_matrix() print("""\norigin:\n""") print_matrix(matrix) print("""\nrotate 90 counterclockwise:\n""") print_matrix(rotate_aa(matrix)) lowerCAmelCase : Any = make_matrix() print("""\norigin:\n""") print_matrix(matrix) print("""\nrotate 180:\n""") print_matrix(rotate_aaa(matrix)) lowerCAmelCase : Tuple = make_matrix() print("""\norigin:\n""") print_matrix(matrix) print("""\nrotate 270 counterclockwise:\n""") print_matrix(rotate_aaa(matrix))
630
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Dict = len(_A ) while cur > 1: # Find the maximum number in arr _lowerCAmelCase : int = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _lowerCAmelCase : Dict = arr[mi::-1] + arr[mi + 1 : len(_A )] # Reverse whole list _lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(_A )] cur -= 1 return arr if __name__ == "__main__": lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
630
1
'''simple docstring''' import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MobileBertTokenizer __magic_name__ = MobileBertTokenizerFast __magic_name__ = True __magic_name__ = True __magic_name__ = filter_non_english __magic_name__ = "google/mobilebert-uncased" def a ( self ): '''simple docstring''' super().setUp() _lowerCAmelCase : Optional[int] = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) _lowerCAmelCase : Union[str, Any] = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = 'UNwant\u00E9d,running' _lowerCAmelCase : List[Any] = 'unwanted, running' return input_text, output_text def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.tokenizer_class(self.vocab_file ) _lowerCAmelCase : int = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(snake_case__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [9, 6, 7, 12, 10, 11] ) def a ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return _lowerCAmelCase : int = self.get_tokenizer() _lowerCAmelCase : List[Any] = self.get_rust_tokenizer() _lowerCAmelCase : Any = 'UNwant\u00E9d,running' _lowerCAmelCase : List[Any] = tokenizer.tokenize(snake_case__ ) _lowerCAmelCase : Tuple = rust_tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) _lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) _lowerCAmelCase : List[str] = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) _lowerCAmelCase : int = self.get_rust_tokenizer() _lowerCAmelCase : List[Any] = tokenizer.encode(snake_case__ ) _lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) # With lower casing _lowerCAmelCase : int = self.get_tokenizer(do_lower_case=snake_case__ ) _lowerCAmelCase : Tuple = self.get_rust_tokenizer(do_lower_case=snake_case__ ) _lowerCAmelCase : List[str] = 'UNwant\u00E9d,running' _lowerCAmelCase : Dict = tokenizer.tokenize(snake_case__ ) _lowerCAmelCase : int = rust_tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) _lowerCAmelCase : Tuple = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) _lowerCAmelCase : Any = self.get_rust_tokenizer() _lowerCAmelCase : Union[str, Any] = tokenizer.encode(snake_case__ ) _lowerCAmelCase : Any = rust_tokenizer.encode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = BasicTokenizer(do_lower_case=snake_case__ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = BasicTokenizer(do_lower_case=snake_case__ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = BasicTokenizer(do_lower_case=snake_case__ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = BasicTokenizer(do_lower_case=snake_case__ , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] _lowerCAmelCase : List[str] = {} for i, token in enumerate(snake_case__ ): _lowerCAmelCase : List[Any] = i _lowerCAmelCase : Tuple = WordpieceTokenizer(vocab=snake_case__ , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def a ( self ): '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def a ( self ): '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def a ( self ): '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.get_tokenizer() _lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' ) _lowerCAmelCase : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=snake_case__ ) _lowerCAmelCase : List[str] = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case__ ) _lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ ) _lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def a ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Union[str, Any] = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' _lowerCAmelCase : str = tokenizer_r.encode_plus( snake_case__ , return_attention_mask=snake_case__ , return_token_type_ids=snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ , ) _lowerCAmelCase : Dict = tokenizer_r.do_lower_case if hasattr(snake_case__ , 'do_lower_case' ) else False _lowerCAmelCase : Union[str, Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = ['的', '人', '有'] _lowerCAmelCase : str = ''.join(snake_case__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : Dict = True _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Union[str, Any] = tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_r.convert_ids_to_tokens(snake_case__ ) _lowerCAmelCase : str = tokenizer_p.convert_ids_to_tokens(snake_case__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(snake_case__ , snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) _lowerCAmelCase : Optional[int] = False _lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : List[Any] = tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ ) _lowerCAmelCase : Any = tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer_r.convert_ids_to_tokens(snake_case__ ) _lowerCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(snake_case__ ) # it is expected that only the first Chinese character is not preceded by "##". _lowerCAmelCase : Any = [ F'##{token}' if idx != 0 else token for idx, token in enumerate(snake_case__ ) ] self.assertListEqual(snake_case__ , snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ )
630
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : str = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "gptj" __magic_name__ = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : int = vocab_size _lowerCAmelCase : Optional[int] = n_positions _lowerCAmelCase : Optional[int] = n_embd _lowerCAmelCase : Optional[int] = n_layer _lowerCAmelCase : str = n_head _lowerCAmelCase : Tuple = n_inner _lowerCAmelCase : Tuple = rotary_dim _lowerCAmelCase : Optional[int] = activation_function _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : List[str] = embd_pdrop _lowerCAmelCase : int = attn_pdrop _lowerCAmelCase : Any = layer_norm_epsilon _lowerCAmelCase : Optional[int] = initializer_range _lowerCAmelCase : List[str] = use_cache _lowerCAmelCase : Dict = bos_token_id _lowerCAmelCase : Any = eos_token_id super().__init__( bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ ) if not getattr(self._config , 'pad_token_id' , snake_case__ ): # TODO: how to do that better? _lowerCAmelCase : Any = 0 @property def a ( self ): '''simple docstring''' _lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(snake_case__ , direction='inputs' ) _lowerCAmelCase : int = {0: 'batch', 1: 'past_sequence + sequence'} else: _lowerCAmelCase : int = {0: 'batch', 1: 'sequence'} return common_inputs @property def a ( self ): '''simple docstring''' return self._config.n_layer @property def a ( self ): '''simple docstring''' return self._config.n_head def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = super(snake_case__ , self ).generate_dummy_inputs( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) # We need to order the input in the way they appears in the forward() _lowerCAmelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs['input_ids'].shape # Not using the same length for past_key_values _lowerCAmelCase : Any = seqlen + 2 _lowerCAmelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _lowerCAmelCase : Tuple = [ (torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers ) ] _lowerCAmelCase : Tuple = common_inputs['attention_mask'] if self.use_past: _lowerCAmelCase : Any = ordered_inputs['attention_mask'].dtype _lowerCAmelCase : Union[str, Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 ) return ordered_inputs @property def a ( self ): '''simple docstring''' return 13
630
1
'''simple docstring''' import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = WavaVecaPhonemeCTCTokenizer __magic_name__ = False def a ( self ): '''simple docstring''' super().setUp() _lowerCAmelCase : List[str] = ( '<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ' 'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ' 'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ' 'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ' 'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ' 'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ' 'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ' 'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ' 'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ' 'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ' 'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ' 'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ' 'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4' ).split(' ' ) _lowerCAmelCase : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) _lowerCAmelCase : Tuple = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'} _lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(snake_case__ ) + '\n' ) def a ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case__ )) for i in range(len(snake_case__ ) )] _lowerCAmelCase : Union[str, Any] = list(filter(lambda snake_case__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=snake_case__ ) , snake_case__ ) ) if max_length is not None and len(snake_case__ ) > max_length: _lowerCAmelCase : Any = toks[:max_length] if min_length is not None and len(snake_case__ ) < min_length and len(snake_case__ ) > 0: while len(snake_case__ ) < min_length: _lowerCAmelCase : List[Any] = toks + toks # toks_str = [t[1] for t in toks] _lowerCAmelCase : Optional[Any] = [t[0] for t in toks] # Ensure consistency _lowerCAmelCase : Optional[Any] = tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ ) if " " not in output_txt and len(snake_case__ ) > 1: _lowerCAmelCase : List[Any] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case__ ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case__ ) ) if with_prefix_space: _lowerCAmelCase : int = ' ' + output_txt _lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) return output_txt, output_ids def a ( self , **snake_case__ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) # check adding a single token tokenizer.add_tokens('xxx' ) _lowerCAmelCase : Optional[int] = tokenizer('m xxx ɪ' , do_phonemize=snake_case__ ).input_ids self.assertEqual(snake_case__ , [13, 392, 17] ) # xxx should be last token tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] ) _lowerCAmelCase : Optional[Any] = tokenizer('m aaa ɪ ccc' , do_phonemize=snake_case__ ).input_ids self.assertEqual(snake_case__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa _lowerCAmelCase : List[str] = tokenizer('maɪ c' , do_phonemize=snake_case__ ).input_ids self.assertEqual(snake_case__ , [3, 200] ) # mai should be <unk> (=3) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) _lowerCAmelCase : int = 'Hello how are you' _lowerCAmelCase : List[str] = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' ) self.assertEqual(snake_case__ , 'h ə l oʊ h aʊ ɑːɹ j uː' ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) _lowerCAmelCase : Optional[int] = 'Hello how are you' _lowerCAmelCase : List[Any] = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' ) self.assertEqual(tokenizer(snake_case__ ).input_ids , tokenizer(snake_case__ , do_phonemize=snake_case__ ).input_ids ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) _lowerCAmelCase : Optional[Any] = 'Hello how are you' _lowerCAmelCase : Tuple = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' ) _lowerCAmelCase : List[str] = tokenizer.decode(tokenizer(snake_case__ ).input_ids ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) _lowerCAmelCase : int = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] _lowerCAmelCase : Any = tokenizer.decode(sample_ids[0] ) _lowerCAmelCase : str = tokenizer.batch_decode(snake_case__ ) self.assertEqual(snake_case__ , batch_tokens[0] ) self.assertEqual(snake_case__ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) _lowerCAmelCase : Optional[int] = 'Hello how are you' _lowerCAmelCase : Tuple = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' ) self.assertEqual(snake_case__ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) _lowerCAmelCase : Union[str, Any] = 'Hello how are you' _lowerCAmelCase : Union[str, Any] = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' ) self.assertEqual(tokenizer(snake_case__ ).input_ids , tokenizer(snake_case__ , do_phonemize=snake_case__ ).input_ids ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) # fmt: off _lowerCAmelCase : int = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter _lowerCAmelCase : int = tokenizer.decode(sample_ids[0] ) _lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case__ ) self.assertEqual(snake_case__ , batch_tokens[0] ) self.assertEqual(snake_case__ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] ) # decode with no word_del_token filter _lowerCAmelCase : Union[str, Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer.batch_decode(snake_case__ , filter_word_delimiter_token=snake_case__ ) self.assertEqual(snake_case__ , batch_tokens[0] ) self.assertEqual(snake_case__ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) _lowerCAmelCase : List[str] = 'Hello how are you' _lowerCAmelCase : str = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' ) _lowerCAmelCase : int = tokenizer.decode(tokenizer(snake_case__ ).input_ids , filter_word_delimiter_token=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) _lowerCAmelCase : Union[str, Any] = 'Hello how are you' _lowerCAmelCase : Union[str, Any] = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' ) _lowerCAmelCase : List[Any] = tokenizer.decode(tokenizer(snake_case__ ).input_ids , filter_word_delimiter_token=snake_case__ ) self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=snake_case__ ) _lowerCAmelCase : Any = 'Hello how are you' _lowerCAmelCase : Optional[int] = tokenizer(snake_case__ , phonemizer_lang='en-us' ).input_ids _lowerCAmelCase : Union[str, Any] = tokenizer(snake_case__ , phonemizer_lang='fr-fr' ).input_ids self.assertNotEqual(snake_case__ , snake_case__ ) _lowerCAmelCase : List[Any] = tokenizer.decode(snake_case__ ) _lowerCAmelCase : str = tokenizer.decode(snake_case__ ) self.assertEqual(snake_case__ , 'h ə l oʊ h aʊ ɑːɹ j uː' ) self.assertEqual(snake_case__ , 'ɛ l o h aʊ a ʁ j u' ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) _lowerCAmelCase : int = 'Hello how Are you' _lowerCAmelCase : List[Any] = 'hello how are you' _lowerCAmelCase : Optional[Any] = tokenizer(snake_case__ ).input_ids _lowerCAmelCase : Optional[int] = tokenizer(snake_case__ ).input_ids self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) tokenizer.add_tokens(['!', '?'] ) tokenizer.add_special_tokens({'cls_token': '$$$'} ) # fmt: off _lowerCAmelCase : Union[str, Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on _lowerCAmelCase : int = tokenizer.batch_decode(snake_case__ ) self.assertEqual(snake_case__ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] ) @staticmethod def a ( snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = [d[key] for d in offsets] return retrieved_list def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.get_tokenizer(word_delimiter_token='|' ) tokenizer.add_tokens('|' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" _lowerCAmelCase : Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on _lowerCAmelCase : Tuple = tokenizer.decode(snake_case__ , output_char_offsets=snake_case__ , filter_word_delimiter_token=snake_case__ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('text' in outputs ) self.assertTrue('char_offsets' in outputs ) self.assertTrue(isinstance(snake_case__ , snake_case__ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.get_tokenizer(word_delimiter_token='|' ) def check_list_tuples_equal(snake_case__ , snake_case__ ): self.assertTrue(isinstance(snake_case__ , snake_case__ ) ) self.assertTrue(isinstance(outputs_list[0] , snake_case__ ) ) # transform list to ModelOutput _lowerCAmelCase : int = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] ) def recursive_check(snake_case__ , snake_case__ ): if isinstance(snake_case__ , snake_case__ ): [recursive_check(snake_case__ , snake_case__ ) for la, la in zip(snake_case__ , snake_case__ )] self.assertEqual(snake_case__ , snake_case__ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] ) # fmt: off _lowerCAmelCase : str = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char _lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(snake_case__ , output_char_offsets=snake_case__ ) _lowerCAmelCase : Dict = [tokenizer.decode(snake_case__ , output_char_offsets=snake_case__ ) for ids in sample_ids] check_list_tuples_equal(snake_case__ , snake_case__ ) @unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' ) def a ( self ): '''simple docstring''' pass @unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' ) def a ( self ): '''simple docstring''' pass @unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' ) def a ( self ): '''simple docstring''' pass @unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.get_tokenizers(do_lower_case=snake_case__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): _lowerCAmelCase : Any = tokenizer.vocab_size _lowerCAmelCase : List[Any] = len(snake_case__ ) self.assertNotEqual(snake_case__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) _lowerCAmelCase : Optional[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd'] _lowerCAmelCase : Optional[Any] = tokenizer.add_tokens(snake_case__ ) _lowerCAmelCase : Optional[Any] = tokenizer.vocab_size _lowerCAmelCase : Optional[int] = len(snake_case__ ) self.assertNotEqual(snake_case__ , 0 ) self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , len(snake_case__ ) ) self.assertEqual(snake_case__ , all_size + len(snake_case__ ) ) _lowerCAmelCase : Optional[Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=snake_case__ ) self.assertGreaterEqual(len(snake_case__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) _lowerCAmelCase : Dict = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} _lowerCAmelCase : Optional[Any] = tokenizer.add_special_tokens(snake_case__ ) _lowerCAmelCase : Optional[Any] = tokenizer.vocab_size _lowerCAmelCase : Union[str, Any] = len(snake_case__ ) self.assertNotEqual(snake_case__ , 0 ) self.assertEqual(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , len(snake_case__ ) ) self.assertEqual(snake_case__ , all_size_a + len(snake_case__ ) ) _lowerCAmelCase : Tuple = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=snake_case__ ) self.assertGreaterEqual(len(snake_case__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' ) def a ( self ): '''simple docstring''' pass @unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.get_tokenizers(fast=snake_case__ , do_lower_case=snake_case__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): _lowerCAmelCase : List[Any] = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't'] _lowerCAmelCase : Dict = tokenizer.convert_tokens_to_string(snake_case__ ) self.assertIsInstance(output['text'] , snake_case__ )
630
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Any = { """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase : List[Any] = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
'''simple docstring''' import math from datetime import datetime, timedelta def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[Any] = year % 1_9 _lowerCAmelCase : Any = year % 4 _lowerCAmelCase : Optional[int] = year % 7 _lowerCAmelCase : int = math.floor(year / 1_0_0 ) _lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) _lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4 _lowerCAmelCase : Dict = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 _lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon _lowerCAmelCase : Union[str, Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(_A , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(_A , 4 , 1_8 ) else: return datetime(_A , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (19_94, 20_00, 20_10, 20_21, 20_23): lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was""" print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
630
1
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : Any = {"""vocab_file""": """vocab.json"""} lowerCAmelCase : Optional[int] = { """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } lowerCAmelCase : Dict = {"""mgp-str""": 27} class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , snake_case__ , snake_case__="[GO]" , snake_case__="[GO]" , snake_case__="[s]" , snake_case__="[GO]" , **snake_case__ ): '''simple docstring''' super().__init__( unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ , ) with open(snake_case__ , encoding='utf-8' ) as vocab_handle: _lowerCAmelCase : Union[str, Any] = json.load(snake_case__ ) _lowerCAmelCase : int = {v: k for k, v in self.vocab.items()} @property def a ( self ): '''simple docstring''' return len(self.vocab ) def a ( self ): '''simple docstring''' return dict(self.vocab , **self.added_tokens_encoder ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = [] for s in text: char_tokens.extend(snake_case__ ) return char_tokens def a ( self , snake_case__ ): '''simple docstring''' return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) ) def a ( self , snake_case__ ): '''simple docstring''' return self.decoder.get(snake_case__ ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error('Vocabulary path ({}) should be a directory'.format(snake_case__ ) ) return _lowerCAmelCase : List[Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '\n' ) return (vocab_file,)
630
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60] _lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12] _lowerCAmelCase : Dict = 100 self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex( snake_case__ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
630
1
'''simple docstring''' class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = len(snake_case__ ) _lowerCAmelCase : List[str] = [0] * len_array if len_array > 0: _lowerCAmelCase : List[str] = array[0] for i in range(1 , snake_case__ ): _lowerCAmelCase : Optional[Any] = self.prefix_sum[i - 1] + array[i] def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(snake_case__ ) return False if __name__ == "__main__": import doctest doctest.testmod()
630
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps _lowerCAmelCase : Any = boundary[0] _lowerCAmelCase : List[str] = boundary[1] _lowerCAmelCase : Tuple = make_points(_A , _A , _A ) _lowerCAmelCase : Tuple = 0.0 y += (h / 2.0) * f(_A ) for i in x_i: # print(i) y += h * f(_A ) y += (h / 2.0) * f(_A ) return y def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = a + h while x < (b - h): yield x _lowerCAmelCase : Any = x + h def lowercase (_A ): # enter your function here """simple docstring""" _lowerCAmelCase : int = (x - 0) * (x - 0) return y def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration _lowerCAmelCase : Dict = 1.0 # Upper bound of integration _lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution _lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration _lowerCAmelCase : List[Any] = method_a(_A , _A ) print(f'y = {y}' ) if __name__ == "__main__": main()
630
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : Optional[int] = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase : int = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = (UnCLIPScheduler,) def a ( self , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = { 'num_train_timesteps': 1000, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**snake_case__ ) return config def a ( self ): '''simple docstring''' for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def a ( self ): '''simple docstring''' for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=snake_case__ ) def a ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def a ( self ): '''simple docstring''' for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=snake_case__ ) def a ( self ): '''simple docstring''' for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=snake_case__ ) def a ( self ): '''simple docstring''' for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=snake_case__ , prev_timestep=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.scheduler_classes[0] _lowerCAmelCase : List[Any] = self.get_scheduler_config(variance_type='fixed_small_log' ) _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5 def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[int] = self.get_scheduler_config(variance_type='learned_range' ) _lowerCAmelCase : int = scheduler_class(**snake_case__ ) _lowerCAmelCase : int = 0.5 assert scheduler._get_variance(1 , predicted_variance=snake_case__ ) - -10.171_2790 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=snake_case__ ) - -5.799_8052 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=snake_case__ ) - -0.001_0011 < 1E-5 def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.scheduler_classes[0] _lowerCAmelCase : int = self.get_scheduler_config() _lowerCAmelCase : str = scheduler_class(**snake_case__ ) _lowerCAmelCase : List[Any] = scheduler.timesteps _lowerCAmelCase : List[str] = self.dummy_model() _lowerCAmelCase : int = self.dummy_sample_deter _lowerCAmelCase : Dict = torch.manual_seed(0 ) for i, t in enumerate(snake_case__ ): # 1. predict noise residual _lowerCAmelCase : List[str] = model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase : List[str] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample _lowerCAmelCase : int = pred_prev_sample _lowerCAmelCase : List[str] = torch.sum(torch.abs(snake_case__ ) ) _lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 252.268_2495 ) < 1E-2 assert abs(result_mean.item() - 0.328_4743 ) < 1E-3 def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.scheduler_classes[0] _lowerCAmelCase : Optional[Any] = self.get_scheduler_config() _lowerCAmelCase : List[str] = scheduler_class(**snake_case__ ) scheduler.set_timesteps(25 ) _lowerCAmelCase : List[str] = scheduler.timesteps _lowerCAmelCase : List[str] = self.dummy_model() _lowerCAmelCase : List[Any] = self.dummy_sample_deter _lowerCAmelCase : Dict = torch.manual_seed(0 ) for i, t in enumerate(snake_case__ ): # 1. predict noise residual _lowerCAmelCase : Optional[Any] = model(snake_case__ , snake_case__ ) if i + 1 == timesteps.shape[0]: _lowerCAmelCase : List[Any] = None else: _lowerCAmelCase : str = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowerCAmelCase : Optional[int] = scheduler.step( snake_case__ , snake_case__ , snake_case__ , prev_timestep=snake_case__ , generator=snake_case__ ).prev_sample _lowerCAmelCase : Any = pred_prev_sample _lowerCAmelCase : Union[str, Any] = torch.sum(torch.abs(snake_case__ ) ) _lowerCAmelCase : str = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 258.204_4983 ) < 1E-2 assert abs(result_mean.item() - 0.336_2038 ) < 1E-3 def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' pass
630
'''simple docstring''' from collections import Counter from timeit import timeit def lowercase (_A = "" , ): """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2 def lowercase (_A = "" ): """simple docstring""" if len(_A ) == 0: return True _lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string _lowerCAmelCase : dict[str, int] = {} for character in lower_case_input_str: _lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1 _lowerCAmelCase : List[Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def lowercase (_A = "" ): """simple docstring""" print('\nFor string = ' , _A , ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) print( '> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) if __name__ == "__main__": lowerCAmelCase : Tuple = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
630
1
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def a ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(snake_case__ ): _lowerCAmelCase : Dict = AutoConfig.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) _lowerCAmelCase : Dict = FlaxAutoModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(snake_case__ ): _lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) _lowerCAmelCase : List[Any] = FlaxAutoModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: _lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(snake_case__ ) _lowerCAmelCase : str = FlaxBertModel.from_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX ) @jax.jit def eval(**snake_case__ ): return model(**snake_case__ ) eval(**snake_case__ ).block_until_ready() @slow def a ( self ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: _lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(snake_case__ ) _lowerCAmelCase : Optional[int] = FlaxRobertaModel.from_pretrained(snake_case__ ) _lowerCAmelCase : str = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX ) @jax.jit def eval(**snake_case__ ): return model(**snake_case__ ) eval(**snake_case__ ).block_until_ready() def a ( self ): '''simple docstring''' with self.assertRaisesRegex( snake_case__ , 'bert-base is not a local folder and is not a valid model identifier' ): _lowerCAmelCase : Any = FlaxAutoModel.from_pretrained('bert-base' ) def a ( self ): '''simple docstring''' with self.assertRaisesRegex( snake_case__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): _lowerCAmelCase : Union[str, Any] = FlaxAutoModel.from_pretrained(snake_case__ , revision='aaaaaa' ) def a ( self ): '''simple docstring''' with self.assertRaisesRegex( snake_case__ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ): _lowerCAmelCase : Tuple = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def a ( self ): '''simple docstring''' with self.assertRaisesRegex(snake_case__ , 'Use `from_pt=True` to load this model' ): _lowerCAmelCase : int = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
630
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : int = { """facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "data2vec-text" def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Tuple = hidden_size _lowerCAmelCase : Dict = num_hidden_layers _lowerCAmelCase : int = num_attention_heads _lowerCAmelCase : str = hidden_act _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : Any = hidden_dropout_prob _lowerCAmelCase : Optional[int] = attention_probs_dropout_prob _lowerCAmelCase : str = max_position_embeddings _lowerCAmelCase : Any = type_vocab_size _lowerCAmelCase : int = initializer_range _lowerCAmelCase : List[str] = layer_norm_eps _lowerCAmelCase : List[Any] = position_embedding_type _lowerCAmelCase : str = use_cache _lowerCAmelCase : Union[str, Any] = classifier_dropout class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def a ( self ): '''simple docstring''' if self.task == "multiple-choice": _lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
630
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""", """funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""", """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""", """funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""", """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""", """funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""", """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""", """funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "funnel" __magic_name__ = { "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self , snake_case__=3_0522 , snake_case__=[4, 4, 4] , snake_case__=None , snake_case__=2 , snake_case__=768 , snake_case__=12 , snake_case__=64 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=None , snake_case__=1E-9 , snake_case__="mean" , snake_case__="relative_shift" , snake_case__=True , snake_case__=True , snake_case__=True , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = vocab_size _lowerCAmelCase : List[Any] = block_sizes _lowerCAmelCase : int = [1] * len(snake_case__ ) if block_repeats is None else block_repeats assert len(snake_case__ ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." _lowerCAmelCase : Tuple = num_decoder_layers _lowerCAmelCase : List[Any] = d_model _lowerCAmelCase : Optional[int] = n_head _lowerCAmelCase : List[str] = d_head _lowerCAmelCase : Union[str, Any] = d_inner _lowerCAmelCase : List[str] = hidden_act _lowerCAmelCase : List[Any] = hidden_dropout _lowerCAmelCase : Union[str, Any] = attention_dropout _lowerCAmelCase : Optional[int] = activation_dropout _lowerCAmelCase : str = initializer_range _lowerCAmelCase : int = initializer_std _lowerCAmelCase : Dict = layer_norm_eps assert pooling_type in [ "mean", "max", ], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.' _lowerCAmelCase : str = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.' _lowerCAmelCase : List[Any] = attention_type _lowerCAmelCase : int = separate_cls _lowerCAmelCase : Union[str, Any] = truncate_seq _lowerCAmelCase : List[Any] = pool_q_only super().__init__(**snake_case__ ) @property def a ( self ): '''simple docstring''' return sum(self.block_sizes ) @num_hidden_layers.setter def a ( self , snake_case__ ): '''simple docstring''' raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' ) @property def a ( self ): '''simple docstring''' return len(self.block_sizes ) @num_blocks.setter def a ( self , snake_case__ ): '''simple docstring''' raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
630
'''simple docstring''' import pytest import datasets # Import fixture modules as plugins lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""] def lowercase (_A , _A ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def lowercase (_A ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache' _lowerCAmelCase : Dict = test_hf_cache_home / 'datasets' _lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics' _lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) ) _lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) ) _lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) ) @pytest.fixture(autouse=_A , scope='session' ) def lowercase (): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_A ) def lowercase (_A ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A ) @pytest.fixture def lowercase (_A ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
630
1
'''simple docstring''' import pytest import datasets # Import fixture modules as plugins lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""] def lowercase (_A , _A ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def lowercase (_A ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache' _lowerCAmelCase : Dict = test_hf_cache_home / 'datasets' _lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics' _lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) ) _lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) ) _lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) ) @pytest.fixture(autouse=_A , scope='session' ) def lowercase (): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_A ) def lowercase (_A ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A ) @pytest.fixture def lowercase (_A ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
630
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase : str = logging.get_logger(__name__) # General docstring lowerCAmelCase : Optional[Any] = """RegNetConfig""" # Base docstring lowerCAmelCase : int = """facebook/regnet-y-040""" lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7] # Image classification docstring lowerCAmelCase : Any = """facebook/regnet-y-040""" lowerCAmelCase : Optional[Any] = """tabby, tabby cat""" lowerCAmelCase : Tuple = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , ) _lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) _lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) ) _lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ ) _lowerCAmelCase : int = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : str = config.num_channels _lowerCAmelCase : List[Any] = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) ) _lowerCAmelCase : Tuple = self.embedder(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' ) _lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ ) class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' ) _lowerCAmelCase : str = [ tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.pooler(snake_case__ ) for layer_module in self.attention: _lowerCAmelCase : Tuple = layer_module(snake_case__ ) _lowerCAmelCase : Optional[Any] = hidden_state * pooled return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1 _lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width ) _lowerCAmelCase : Optional[Any] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _lowerCAmelCase : Any = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ), ] _lowerCAmelCase : List[str] = ACTaFN[config.hidden_act] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = hidden_state for layer_module in self.layers: _lowerCAmelCase : int = layer_module(snake_case__ ) _lowerCAmelCase : int = self.shortcut(snake_case__ ) hidden_state += residual _lowerCAmelCase : Tuple = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1 _lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width ) _lowerCAmelCase : Optional[Any] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) _lowerCAmelCase : Tuple = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ), ] _lowerCAmelCase : Tuple = ACTaFN[config.hidden_act] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = hidden_state for layer_module in self.layers: _lowerCAmelCase : List[Any] = layer_module(snake_case__ ) _lowerCAmelCase : Tuple = self.shortcut(snake_case__ ) hidden_state += residual _lowerCAmelCase : str = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer _lowerCAmelCase : Optional[int] = [ # downsampling is done in the first layer with stride of 2 layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ), *[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )], ] def a ( self , snake_case__ ): '''simple docstring''' for layer_module in self.layers: _lowerCAmelCase : int = layer_module(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : str = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) _lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) ) def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ): '''simple docstring''' _lowerCAmelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _lowerCAmelCase : str = hidden_states + (hidden_state,) _lowerCAmelCase : List[str] = stage_module(snake_case__ ) if output_hidden_states: _lowerCAmelCase : Dict = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) @keras_serializable class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" __magic_name__ = RegNetConfig def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = config _lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' ) _lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' ) _lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' ) @unpack_inputs def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' _lowerCAmelCase : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ ) _lowerCAmelCase : List[str] = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) _lowerCAmelCase : List[Any] = encoder_outputs[0] _lowerCAmelCase : Tuple = self.pooler(snake_case__ ) # Change to NCHW output format have uniformity in the modules _lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) _lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = RegNetConfig __magic_name__ = "regnet" __magic_name__ = "pixel_values" @property def a ( self ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} lowerCAmelCase : List[Any] = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ lowerCAmelCase : Dict = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) _lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : str = self.regnet( pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = config.num_labels _lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' ) # classification head _lowerCAmelCase : Optional[int] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : Dict = self.regnet( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) _lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] _lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ ) _lowerCAmelCase : Tuple = self.classifier[1](snake_case__ ) _lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ ) if not return_dict: _lowerCAmelCase : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
630
1
'''simple docstring''' import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) lowerCAmelCase : List[str] = """bert-base-cased""" lowerCAmelCase : Union[str, Any] = """fp16""" lowerCAmelCase : Dict = """bf16""" lowerCAmelCase : Any = [FPaa, BFaa] @require_fsdp @require_cuda class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' super().setUp() _lowerCAmelCase : Optional[Any] = dict( ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , ) def a ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(snake_case__ ): _lowerCAmelCase : Optional[int] = self.dist_env.copy() _lowerCAmelCase : Dict = F'{i + 1}' _lowerCAmelCase : Optional[Any] = strategy with mockenv_context(**snake_case__ ): _lowerCAmelCase : List[Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def a ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(snake_case__ ): _lowerCAmelCase : Union[str, Any] = self.dist_env.copy() _lowerCAmelCase : List[Any] = prefetch_policy with mockenv_context(**snake_case__ ): _lowerCAmelCase : Any = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def a ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(snake_case__ ): _lowerCAmelCase : Any = self.dist_env.copy() _lowerCAmelCase : Any = state_dict_type with mockenv_context(**snake_case__ ): _lowerCAmelCase : List[Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(snake_case__ ) for policy in FSDP_AUTO_WRAP_POLICY: _lowerCAmelCase : str = self.dist_env.copy() _lowerCAmelCase : List[Any] = policy if policy == "TRANSFORMER_BASED_WRAP": _lowerCAmelCase : List[str] = 'BertLayer' elif policy == "SIZE_BASED_WRAP": _lowerCAmelCase : Union[str, Any] = '2000' with mockenv_context(**snake_case__ ): _lowerCAmelCase : Dict = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case__ ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) _lowerCAmelCase : int = self.dist_env.copy() _lowerCAmelCase : Union[str, Any] = 'TRANSFORMER_BASED_WRAP' _lowerCAmelCase : List[str] = 'T5Layer' with mockenv_context(**snake_case__ ): _lowerCAmelCase : Optional[Any] = FullyShardedDataParallelPlugin() with self.assertRaises(snake_case__ ) as cm: fsdp_plugin.set_auto_wrap_policy(snake_case__ ) self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) ) _lowerCAmelCase : str = self.dist_env.copy() _lowerCAmelCase : List[Any] = 'SIZE_BASED_WRAP' _lowerCAmelCase : Tuple = '0' with mockenv_context(**snake_case__ ): _lowerCAmelCase : Optional[Any] = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case__ ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def a ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: _lowerCAmelCase : Tuple = self.dist_env.copy() _lowerCAmelCase : List[Any] = mp_dtype with mockenv_context(**snake_case__ ): _lowerCAmelCase : Tuple = Accelerator() if mp_dtype == "fp16": _lowerCAmelCase : Dict = torch.floataa elif mp_dtype == "bf16": _lowerCAmelCase : List[str] = torch.bfloataa _lowerCAmelCase : Optional[Any] = MixedPrecision(param_dtype=snake_case__ , reduce_dtype=snake_case__ , buffer_dtype=snake_case__ ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , snake_case__ ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , snake_case__ ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(snake_case__ ) def a ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: _lowerCAmelCase : List[Any] = self.dist_env.copy() _lowerCAmelCase : Union[str, Any] = str(snake_case__ ).lower() with mockenv_context(**snake_case__ ): _lowerCAmelCase : Tuple = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=snake_case__ ) ) @require_fsdp @require_multi_gpu @slow class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' super().setUp() _lowerCAmelCase : Any = 0.82 _lowerCAmelCase : Optional[int] = [ 'fsdp_shard_grad_op_transformer_based_wrap', 'fsdp_full_shard_transformer_based_wrap', ] _lowerCAmelCase : Any = { 'multi_gpu_fp16': 3200, 'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000, 'fsdp_full_shard_transformer_based_wrap_fp16': 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } _lowerCAmelCase : List[Any] = 160 _lowerCAmelCase : int = 160 _lowerCAmelCase : List[str] = inspect.getfile(accelerate.test_utils ) _lowerCAmelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = os.path.join(self.test_scripts_folder , 'test_performance.py' ) _lowerCAmelCase : str = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp'] for config in self.performance_configs: _lowerCAmelCase : Tuple = cmd.copy() for i, strategy in enumerate(snake_case__ ): if strategy.lower() in config: cmd_config.append(F'--fsdp_sharding_strategy={i+1}' ) break if "fp32" in config: cmd_config.append('--mixed_precision=no' ) else: cmd_config.append('--mixed_precision=fp16' ) if "cpu_offload" in config: cmd_config.append('--fsdp_offload_params=True' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('--fsdp_min_num_params=2000' ) cmd_config.extend( [ self.test_file_path, F'--output_dir={self.tmpdir}', F'--performance_lower_bound={self.performance_lower_bound}', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case__ , env=os.environ.copy() ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = os.path.join(self.test_scripts_folder , 'test_checkpointing.py' ) _lowerCAmelCase : Tuple = [ 'accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp', '--mixed_precision=fp16', '--fsdp_transformer_layer_cls_to_wrap=BertLayer', ] for i, strategy in enumerate(snake_case__ ): _lowerCAmelCase : Any = cmd.copy() cmd_config.append(F'--fsdp_sharding_strategy={i+1}' ) if strategy != "FULL_SHARD": continue _lowerCAmelCase : str = len(snake_case__ ) for state_dict_type in FSDP_STATE_DICT_TYPE: _lowerCAmelCase : List[str] = cmd_config[:state_dict_config_index] cmd_config.append(F'--fsdp_state_dict_type={state_dict_type}' ) cmd_config.extend( [ self.test_file_path, F'--output_dir={self.tmpdir}', '--partial_train_epoch=1', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case__ , env=os.environ.copy() ) _lowerCAmelCase : List[str] = cmd_config[:-1] _lowerCAmelCase : List[str] = os.path.join(self.tmpdir , 'epoch_0' ) cmd_config.extend( [ F'--resume_from_checkpoint={resume_from_checkpoint}', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case__ , env=os.environ.copy() ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' ) _lowerCAmelCase : List[Any] = [ 'accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): _lowerCAmelCase : Dict = cmd.copy() if "fp16" in spec: cmd_config.extend(['--mixed_precision=fp16'] ) else: cmd_config.extend(['--mixed_precision=no'] ) if "multi_gpu" in spec: continue else: cmd_config.extend(['--use_fsdp'] ) for i, strategy in enumerate(snake_case__ ): if strategy.lower() in spec: cmd_config.append(F'--fsdp_sharding_strategy={i+1}' ) break if "cpu_offload" in spec: cmd_config.append('--fsdp_offload_params=True' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('--fsdp_min_num_params=2000' ) cmd_config.extend( [ self.test_file_path, F'--output_dir={self.tmpdir}', F'--peak_memory_upper_bound={peak_mem_upper_bound}', F'--n_train={self.n_train}', F'--n_val={self.n_val}', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case__ , env=os.environ.copy() )
630
'''simple docstring''' from typing import Any def lowercase (_A ): """simple docstring""" if not input_list: return [] _lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list] _lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
630
1
'''simple docstring''' from __future__ import annotations import time import numpy as np lowerCAmelCase : List[str] = [8, 5, 9, 7] lowerCAmelCase : Union[str, Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] lowerCAmelCase : List[str] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = claim_vector _lowerCAmelCase : List[str] = allocated_resources_table _lowerCAmelCase : List[str] = maximum_claim_table def a ( self ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def a ( self ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def a ( self ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case__ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def a ( self ): '''simple docstring''' return {self.__need().index(snake_case__ ): i for i in self.__need()} def a ( self , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.__need() _lowerCAmelCase : Tuple = self.__allocated_resources_table _lowerCAmelCase : str = self.__available_resources() _lowerCAmelCase : Union[str, Any] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n' ) while need_list: _lowerCAmelCase : int = False for each_need in need_list: _lowerCAmelCase : int = True for index, need in enumerate(snake_case__ ): if need > available_resources[index]: _lowerCAmelCase : Dict = False break if execution: _lowerCAmelCase : Tuple = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: _lowerCAmelCase : List[Any] = original_need_index print(F'Process {process_number + 1} is executing.' ) # remove the process run from stack need_list.remove(snake_case__ ) # update available/freed resources stack _lowerCAmelCase : List[str] = np.array(snake_case__ ) + np.array( alloc_resources_table[process_number] ) print( 'Updated available resource stack for processes: ' + ' '.join([str(snake_case__ ) for x in available_resources] ) ) break if safe: print('The process is in a safe state.\n' ) else: print('System in unsafe state. Aborting...\n' ) break def a ( self ): '''simple docstring''' print(' ' * 9 + 'Allocated Resource Table' ) for item in self.__allocated_resources_table: print( F'P{self.__allocated_resources_table.index(snake_case__ ) + 1}' + ' '.join(F'{it:>8}' for it in item ) + '\n' ) print(' ' * 9 + 'System Resource Table' ) for item in self.__maximum_claim_table: print( F'P{self.__maximum_claim_table.index(snake_case__ ) + 1}' + ' '.join(F'{it:>8}' for it in item ) + '\n' ) print( 'Current Usage by Active Processes: ' + ' '.join(str(snake_case__ ) for x in self.__claim_vector ) ) print( 'Initial Available Resources: ' + ' '.join(str(snake_case__ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
630
'''simple docstring''' from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
630
1
'''simple docstring''' from __future__ import annotations def lowercase (_A , _A ): """simple docstring""" print(f'Vertex\tShortest Distance from vertex {src}' ) for i, d in enumerate(_A ): print(f'{i}\t\t{d}' ) def lowercase (_A , _A , _A ): """simple docstring""" for j in range(_A ): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = (graph[j][k] for k in ['src', 'dst', 'weight']) if distance[u] != float('inf' ) and distance[u] + w < distance[v]: return True return False def lowercase (_A , _A , _A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = [float('inf' )] * vertex_count _lowerCAmelCase : Dict = 0.0 for _ in range(vertex_count - 1 ): for j in range(_A ): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = (graph[j][k] for k in ['src', 'dst', 'weight']) if distance[u] != float('inf' ) and distance[u] + w < distance[v]: _lowerCAmelCase : str = distance[u] + w _lowerCAmelCase : int = check_negative_cycle(_A , _A , _A ) if negative_cycle_exists: raise Exception('Negative cycle found' ) return distance if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : Optional[int] = int(input("""Enter number of vertices: """).strip()) lowerCAmelCase : Tuple = int(input("""Enter number of edges: """).strip()) lowerCAmelCase : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("""Edge """, i + 1) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = ( int(x) for x in input("""Enter source, destination, weight: """).strip().split(""" """) ) lowerCAmelCase : Tuple = {"""src""": src, """dst""": dest, """weight""": weight} lowerCAmelCase : List[str] = int(input("""\nEnter shortest path source:""").strip()) lowerCAmelCase : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
630
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""} lowerCAmelCase : Optional[int] = { """vocab_file""": { """AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""", } } lowerCAmelCase : Union[str, Any] = { """AI-Sweden/gpt-sw3-126m""": 20_48, """AI-Sweden/gpt-sw3-350m""": 20_48, """AI-Sweden/gpt-sw3-1.6b""": 20_48, """AI-Sweden/gpt-sw3-6.7b""": 20_48, """AI-Sweden/gpt-sw3-20b""": 20_48, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase : List[Any] = kwargs.get('name_or_path' ) if name_or_path is None: logger.warning( 'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,' ' you are testing the model, this can safely be ignored' ) _lowerCAmelCase : Any = 'None' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing _lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token _lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: _lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token _lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token else: _lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token _lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token super().__init__( do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) _lowerCAmelCase : Union[str, Any] = do_lower_case _lowerCAmelCase : Optional[int] = remove_space _lowerCAmelCase : Any = keep_accents _lowerCAmelCase : Optional[int] = vocab_file _lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case__ ) # Used for whitespace normalization in input texts # fmt : off _lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '„'} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing _lowerCAmelCase : Optional[Any] = re.compile( F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' ) def __getstate__( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.__dict__.copy() _lowerCAmelCase : int = None return state def __setstate__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _lowerCAmelCase : int = {} _lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def a ( self ): '''simple docstring''' return len(self.sp_model ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ ) # Normalize whitespaces _lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] ) # NFC Unicode normalization _lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ ) return text def a ( self , snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.preprocess_text(snake_case__ ) return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.PieceToId(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.IdToPiece(snake_case__ ) @staticmethod def a ( snake_case__ ): '''simple docstring''' return out_string def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = [] _lowerCAmelCase : Optional[Any] = '' _lowerCAmelCase : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case__ ) + token _lowerCAmelCase : Union[str, Any] = True _lowerCAmelCase : List[Any] = [] else: current_sub_tokens.append(snake_case__ ) _lowerCAmelCase : List[Any] = False out_string += self.sp_model.decode(snake_case__ ) return out_string def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : int = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case__ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case__ , 'wb' ) as fi: _lowerCAmelCase : Any = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (out_vocab_file,) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ ) _lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ ) else: _lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text] _lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ ) if return_tensors is True or return_tensors == "pt": _lowerCAmelCase : int = torch.tensor(snake_case__ ) return token_ids def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.decode(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()] _lowerCAmelCase : str = ( F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:' ) return self.encode(text=snake_case__ )
630
1
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""} lowerCAmelCase : Optional[int] = { """vocab_file""": { """AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""", } } lowerCAmelCase : Union[str, Any] = { """AI-Sweden/gpt-sw3-126m""": 20_48, """AI-Sweden/gpt-sw3-350m""": 20_48, """AI-Sweden/gpt-sw3-1.6b""": 20_48, """AI-Sweden/gpt-sw3-6.7b""": 20_48, """AI-Sweden/gpt-sw3-20b""": 20_48, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase : List[Any] = kwargs.get('name_or_path' ) if name_or_path is None: logger.warning( 'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,' ' you are testing the model, this can safely be ignored' ) _lowerCAmelCase : Any = 'None' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing _lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token _lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: _lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token _lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token else: _lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token _lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token super().__init__( do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) _lowerCAmelCase : Union[str, Any] = do_lower_case _lowerCAmelCase : Optional[int] = remove_space _lowerCAmelCase : Any = keep_accents _lowerCAmelCase : Optional[int] = vocab_file _lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case__ ) # Used for whitespace normalization in input texts # fmt : off _lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '„'} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing _lowerCAmelCase : Optional[Any] = re.compile( F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' ) def __getstate__( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.__dict__.copy() _lowerCAmelCase : int = None return state def __setstate__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _lowerCAmelCase : int = {} _lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def a ( self ): '''simple docstring''' return len(self.sp_model ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ ) # Normalize whitespaces _lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] ) # NFC Unicode normalization _lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ ) return text def a ( self , snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.preprocess_text(snake_case__ ) return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.PieceToId(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.IdToPiece(snake_case__ ) @staticmethod def a ( snake_case__ ): '''simple docstring''' return out_string def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = [] _lowerCAmelCase : Optional[Any] = '' _lowerCAmelCase : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case__ ) + token _lowerCAmelCase : Union[str, Any] = True _lowerCAmelCase : List[Any] = [] else: current_sub_tokens.append(snake_case__ ) _lowerCAmelCase : List[Any] = False out_string += self.sp_model.decode(snake_case__ ) return out_string def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : int = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case__ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case__ , 'wb' ) as fi: _lowerCAmelCase : Any = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (out_vocab_file,) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ ) _lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ ) else: _lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text] _lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ ) if return_tensors is True or return_tensors == "pt": _lowerCAmelCase : int = torch.tensor(snake_case__ ) return token_ids def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.decode(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()] _lowerCAmelCase : str = ( F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:' ) return self.encode(text=snake_case__ )
630
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = (DDPMScheduler,) def a ( self , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**snake_case__ ) return config def a ( self ): '''simple docstring''' for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def a ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def a ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def a ( self ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=snake_case__ ) def a ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def a ( self ): '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , ) def a ( self ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def a ( self ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[Any] = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[Any] = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = len(snake_case__ ) _lowerCAmelCase : str = self.dummy_model() _lowerCAmelCase : str = self.dummy_sample_deter _lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 ) for t in reversed(range(snake_case__ ) ): # 1. predict noise residual _lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance _lowerCAmelCase : Dict = pred_prev_sample _lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case__ ) ) _lowerCAmelCase : List[str] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.scheduler_classes[0] _lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='v_prediction' ) _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = len(snake_case__ ) _lowerCAmelCase : Any = self.dummy_model() _lowerCAmelCase : Tuple = self.dummy_sample_deter _lowerCAmelCase : Optional[int] = torch.manual_seed(0 ) for t in reversed(range(snake_case__ ) ): # 1. predict noise residual _lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance _lowerCAmelCase : Tuple = pred_prev_sample _lowerCAmelCase : Any = torch.sum(torch.abs(snake_case__ ) ) _lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[int] = self.get_scheduler_config() _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=snake_case__ ) _lowerCAmelCase : Union[str, Any] = scheduler.timesteps for i, timestep in enumerate(snake_case__ ): if i == len(snake_case__ ) - 1: _lowerCAmelCase : str = -1 else: _lowerCAmelCase : Optional[Any] = timesteps[i + 1] _lowerCAmelCase : int = scheduler.previous_timestep(snake_case__ ) _lowerCAmelCase : int = prev_t.item() self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : Tuple = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(snake_case__ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : List[str] = self.get_scheduler_config() _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = [100, 87, 50, 1, 0] _lowerCAmelCase : int = len(snake_case__ ) with self.assertRaises(snake_case__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : int = self.get_scheduler_config() _lowerCAmelCase : Any = scheduler_class(**snake_case__ ) _lowerCAmelCase : Any = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=snake_case__ )
630
1
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps _lowerCAmelCase : Any = boundary[0] _lowerCAmelCase : List[str] = boundary[1] _lowerCAmelCase : Tuple = make_points(_A , _A , _A ) _lowerCAmelCase : Tuple = 0.0 y += (h / 2.0) * f(_A ) for i in x_i: # print(i) y += h * f(_A ) y += (h / 2.0) * f(_A ) return y def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = a + h while x < (b - h): yield x _lowerCAmelCase : Any = x + h def lowercase (_A ): # enter your function here """simple docstring""" _lowerCAmelCase : int = (x - 0) * (x - 0) return y def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration _lowerCAmelCase : Dict = 1.0 # Upper bound of integration _lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution _lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration _lowerCAmelCase : List[Any] = method_a(_A , _A ) print(f'y = {y}' ) if __name__ == "__main__": main()
630
'''simple docstring''' import socket def lowercase (): """simple docstring""" _lowerCAmelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _lowerCAmelCase : Optional[int] = socket.gethostname() _lowerCAmelCase : Any = 1_2_3_1_2 sock.connect((host, port) ) sock.send(B'Hello server!' ) with open('Received_file' , 'wb' ) as out_file: print('File opened' ) print('Receiving data...' ) while True: _lowerCAmelCase : Union[str, Any] = sock.recv(1_0_2_4 ) if not data: break out_file.write(_A ) print('Successfully received the file' ) sock.close() print('Connection closed' ) if __name__ == "__main__": main()
630
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[int] = parent _lowerCAmelCase : Any = 13 _lowerCAmelCase : Dict = 7 _lowerCAmelCase : Any = True _lowerCAmelCase : Union[str, Any] = True _lowerCAmelCase : Union[str, Any] = False _lowerCAmelCase : List[Any] = True _lowerCAmelCase : int = 99 _lowerCAmelCase : int = 32 _lowerCAmelCase : List[Any] = 2 _lowerCAmelCase : List[Any] = 4 _lowerCAmelCase : Dict = 37 _lowerCAmelCase : str = 'gelu' _lowerCAmelCase : str = 0.1 _lowerCAmelCase : Any = 0.1 _lowerCAmelCase : Union[str, Any] = 512 _lowerCAmelCase : int = 16 _lowerCAmelCase : List[Any] = 2 _lowerCAmelCase : Optional[int] = 0.02 _lowerCAmelCase : int = 3 _lowerCAmelCase : Optional[int] = 4 _lowerCAmelCase : str = None def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase : List[Any] = None if self.use_input_mask: _lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase : List[Any] = None _lowerCAmelCase : Union[str, Any] = None _lowerCAmelCase : str = None if self.use_labels: _lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase : str = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = TFDistilBertModel(config=snake_case__ ) _lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} _lowerCAmelCase : List[str] = model(snake_case__ ) _lowerCAmelCase : List[str] = [input_ids, input_mask] _lowerCAmelCase : Tuple = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = TFDistilBertForMaskedLM(config=snake_case__ ) _lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} _lowerCAmelCase : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = TFDistilBertForQuestionAnswering(config=snake_case__ ) _lowerCAmelCase : Tuple = { 'input_ids': input_ids, 'attention_mask': input_mask, } _lowerCAmelCase : Union[str, Any] = model(snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : List[str] = TFDistilBertForSequenceClassification(snake_case__ ) _lowerCAmelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask} _lowerCAmelCase : Any = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.num_choices _lowerCAmelCase : Optional[Any] = TFDistilBertForMultipleChoice(snake_case__ ) _lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) _lowerCAmelCase : List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) ) _lowerCAmelCase : List[str] = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, } _lowerCAmelCase : int = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : Dict = TFDistilBertForTokenClassification(snake_case__ ) _lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} _lowerCAmelCase : List[str] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.prepare_config_and_inputs() ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : str = config_and_inputs _lowerCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) __magic_name__ = ( { "feature-extraction": TFDistilBertModel, "fill-mask": TFDistilBertForMaskedLM, "question-answering": TFDistilBertForQuestionAnswering, "text-classification": TFDistilBertForSequenceClassification, "token-classification": TFDistilBertForTokenClassification, "zero-shot": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = TFDistilBertModelTester(self ) _lowerCAmelCase : Any = ConfigTester(self , config_class=snake_case__ , dim=37 ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): _lowerCAmelCase : Any = TFDistilBertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_tf class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' ) _lowerCAmelCase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] ) _lowerCAmelCase : Dict = model(snake_case__ )[0] _lowerCAmelCase : int = [1, 6, 768] self.assertEqual(output.shape , snake_case__ ) _lowerCAmelCase : List[Any] = tf.constant( [ [ [0.1926_1885, -0.1373_2955, 0.411_9799], [0.2215_0156, -0.0742_2661, 0.3903_7204], [0.2275_6018, -0.089_6414, 0.370_1467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1E-4 )
630
'''simple docstring''' import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel lowerCAmelCase : Tuple = False lowerCAmelCase : str = True lowerCAmelCase : List[Any] = False if __name__ == "__main__": lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--repo_path""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") lowerCAmelCase : Optional[int] = parser.parse_args() lowerCAmelCase : int = { """image_size""": """sample_size""", """num_res_blocks""": """layers_per_block""", """block_channels""": """block_out_channels""", """down_blocks""": """down_block_types""", """up_blocks""": """up_block_types""", """downscale_freq_shift""": """freq_shift""", """resnet_num_groups""": """norm_num_groups""", """resnet_act_fn""": """act_fn""", """resnet_eps""": """norm_eps""", """num_head_channels""": """attention_head_dim""", } lowerCAmelCase : int = { """time_steps""": """time_proj""", """mid""": """mid_block""", """downsample_blocks""": """down_blocks""", """upsample_blocks""": """up_blocks""", } lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet""" with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader: lowerCAmelCase : int = reader.read() lowerCAmelCase : List[str] = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, """config.json"""): lowerCAmelCase : str = UNetaDModel(**config) else: lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel lowerCAmelCase : Dict = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) lowerCAmelCase : Union[str, Any] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: lowerCAmelCase : str = config[key] del config[key] lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]] lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]] if do_only_weights: lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin""")) lowerCAmelCase : str = {} for param_key, param_value in state_dict.items(): if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""): continue lowerCAmelCase : str = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(""".""")[0] == key: lowerCAmelCase : Dict = param_value lowerCAmelCase : Tuple = True if not has_changed: lowerCAmelCase : Tuple = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
630
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__ = False @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return self.time_input_dim @property def a ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def a ( self ): '''simple docstring''' return 100 @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Union[str, Any] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowerCAmelCase : str = UNetaDConditionModel(**snake_case__ ) return model @property def a ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Tuple = VQModel(**self.dummy_movq_kwargs ) return model def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.dummy_unet _lowerCAmelCase : Any = self.dummy_movq _lowerCAmelCase : int = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , ) _lowerCAmelCase : Optional[int] = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image _lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) ) # create mask _lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase : List[str] = 0 if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Union[str, Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : Dict = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : int = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = 'cpu' _lowerCAmelCase : List[Any] = self.get_dummy_components() _lowerCAmelCase : int = self.pipeline_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) ) _lowerCAmelCase : str = output.images _lowerCAmelCase : int = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] _lowerCAmelCase : Any = image[0, -3:, -3:, -1] _lowerCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : Optional[Any] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def a ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _lowerCAmelCase : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowerCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa ) _lowerCAmelCase : Union[str, Any] = 0 _lowerCAmelCase : Union[str, Any] = 'a hat' _lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) _lowerCAmelCase : Tuple = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _lowerCAmelCase : Any = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : int = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Optional[int] = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase : Dict = pipeline( image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
630
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ): '''simple docstring''' super().__init__() _lowerCAmelCase : Union[str, Any] = pad_token_id _lowerCAmelCase : List[Any] = max_length _lowerCAmelCase : Tuple = vocab _lowerCAmelCase : str = merges _lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ ) @classmethod def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()] _lowerCAmelCase : Any = tokenizer.get_vocab() return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ ) @classmethod def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ ) return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ ) @classmethod def a ( cls , snake_case__ ): '''simple docstring''' return cls(**snake_case__ ) def a ( self ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = self.tf_tokenizer(snake_case__ ) _lowerCAmelCase : str = tf.ones_like(snake_case__ ) if self.pad_token_id is not None: # pad the tokens up to max length _lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: _lowerCAmelCase , _lowerCAmelCase : str = pad_model_inputs( snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
630
1
'''simple docstring''' import os from pathlib import Path def lowercase (): """simple docstring""" from torch.utils.cpp_extension import load _lowerCAmelCase : str = Path(_A ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr' _lowerCAmelCase : str = [ root / filename for filename in [ 'vision.cpp', os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ), os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ), ] ] load( 'MultiScaleDeformableAttention' , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[ '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__', ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
630
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Optional[int] = { """configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""], """feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""], """processing_mctct""": ["""MCTCTProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MCTCTForCTC""", """MCTCTModel""", """MCTCTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = 5_0257 , snake_case__ = 1024 , snake_case__ = 768 , snake_case__ = 12 , snake_case__ = 12 , snake_case__ = None , snake_case__ = "gelu_new" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 1E-5 , snake_case__ = 0.02 , snake_case__ = True , snake_case__ = True , snake_case__ = False , snake_case__ = False , ): '''simple docstring''' super().__init__() _lowerCAmelCase : Any = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and' F' `n_embd`: {n_embd} are not equal.' ) _lowerCAmelCase : Any = prefix_inner_dim _lowerCAmelCase : Any = prefix_hidden_dim _lowerCAmelCase : str = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) _lowerCAmelCase : Optional[Any] = ( nn.Linear(self.prefix_hidden_dim , snake_case__ ) if self.prefix_hidden_dim is not None else nn.Identity() ) _lowerCAmelCase : Union[str, Any] = GPTaConfig( vocab_size=snake_case__ , n_positions=snake_case__ , n_embd=snake_case__ , n_layer=snake_case__ , n_head=snake_case__ , n_inner=snake_case__ , activation_function=snake_case__ , resid_pdrop=snake_case__ , embd_pdrop=snake_case__ , attn_pdrop=snake_case__ , layer_norm_epsilon=snake_case__ , initializer_range=snake_case__ , scale_attn_weights=snake_case__ , use_cache=snake_case__ , scale_attn_by_inverse_layer_idx=snake_case__ , reorder_and_upcast_attn=snake_case__ , ) _lowerCAmelCase : List[str] = GPTaLMHeadModel(snake_case__ ) def a ( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , ): '''simple docstring''' _lowerCAmelCase : str = self.transformer.transformer.wte(snake_case__ ) _lowerCAmelCase : str = self.encode_prefix(snake_case__ ) _lowerCAmelCase : Tuple = self.decode_prefix(snake_case__ ) _lowerCAmelCase : Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: _lowerCAmelCase : List[str] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) _lowerCAmelCase : Union[str, Any] = torch.cat((dummy_token, input_ids) , dim=1 ) _lowerCAmelCase : int = self.transformer(inputs_embeds=snake_case__ , labels=snake_case__ , attention_mask=snake_case__ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' return torch.zeros(snake_case__ , self.prefix_length , dtype=torch.intaa , device=snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.encode_prefix(snake_case__ ) @torch.no_grad() def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = torch.split(snake_case__ , 1 , dim=0 ) _lowerCAmelCase : Dict = [] _lowerCAmelCase : str = [] for feature in features: _lowerCAmelCase : List[Any] = self.decode_prefix(feature.to(snake_case__ ) ) # back to the clip feature # Only support beam search for now _lowerCAmelCase , _lowerCAmelCase : List[Any] = self.generate_beam( input_embeds=snake_case__ , device=snake_case__ , eos_token_id=snake_case__ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) _lowerCAmelCase : Tuple = torch.stack(snake_case__ ) _lowerCAmelCase : List[Any] = torch.stack(snake_case__ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def a ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = 5 , snake_case__ = 67 , snake_case__ = 1.0 , snake_case__ = None , ): '''simple docstring''' _lowerCAmelCase : List[Any] = eos_token_id _lowerCAmelCase : List[Any] = None _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : Optional[Any] = torch.ones(snake_case__ , device=snake_case__ , dtype=torch.int ) _lowerCAmelCase : Optional[Any] = torch.zeros(snake_case__ , device=snake_case__ , dtype=torch.bool ) if input_embeds is not None: _lowerCAmelCase : int = input_embeds else: _lowerCAmelCase : List[Any] = self.transformer.transformer.wte(snake_case__ ) for i in range(snake_case__ ): _lowerCAmelCase : str = self.transformer(inputs_embeds=snake_case__ ) _lowerCAmelCase : Tuple = outputs.logits _lowerCAmelCase : Optional[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) _lowerCAmelCase : Any = logits.softmax(-1 ).log() if scores is None: _lowerCAmelCase , _lowerCAmelCase : Dict = logits.topk(snake_case__ , -1 ) _lowerCAmelCase : Optional[Any] = generated.expand(snake_case__ , *generated.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase : Dict = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: _lowerCAmelCase : Dict = next_tokens else: _lowerCAmelCase : str = tokens.expand(snake_case__ , *tokens.shape[1:] ) _lowerCAmelCase : Optional[int] = torch.cat((tokens, next_tokens) , dim=1 ) else: _lowerCAmelCase : Any = -float(np.inf ) _lowerCAmelCase : Union[str, Any] = 0 _lowerCAmelCase : int = scores[:, None] + logits seq_lengths[~is_stopped] += 1 _lowerCAmelCase : Any = scores_sum / seq_lengths[:, None] _lowerCAmelCase , _lowerCAmelCase : List[Any] = scores_sum_average.view(-1 ).topk(snake_case__ , -1 ) _lowerCAmelCase : List[str] = next_tokens // scores_sum.shape[1] _lowerCAmelCase : Tuple = seq_lengths[next_tokens_source] _lowerCAmelCase : Optional[int] = next_tokens % scores_sum.shape[1] _lowerCAmelCase : Dict = next_tokens.unsqueeze(1 ) _lowerCAmelCase : Dict = tokens[next_tokens_source] _lowerCAmelCase : Dict = torch.cat((tokens, next_tokens) , dim=1 ) _lowerCAmelCase : Optional[int] = generated[next_tokens_source] _lowerCAmelCase : Optional[int] = scores_sum_average * seq_lengths _lowerCAmelCase : List[Any] = is_stopped[next_tokens_source] _lowerCAmelCase : Any = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) _lowerCAmelCase : Dict = torch.cat((generated, next_token_embed) , dim=1 ) _lowerCAmelCase : Optional[Any] = is_stopped + next_tokens.eq(snake_case__ ).squeeze() if is_stopped.all(): break _lowerCAmelCase : Dict = scores / seq_lengths _lowerCAmelCase : List[str] = scores.argsort(descending=snake_case__ ) # tokens tensors are already padded to max_seq_length _lowerCAmelCase : Optional[int] = [tokens[i] for i in order] _lowerCAmelCase : Optional[int] = torch.stack(snake_case__ , dim=0 ) _lowerCAmelCase : str = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
630
'''simple docstring''' lowerCAmelCase : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def lowercase (_A ): """simple docstring""" _lowerCAmelCase : str = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0] number //= 1_0_0_0_0_0 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowerCAmelCase : list[bool | None] = [None] * 10_00_00_00 lowerCAmelCase : List[str] = True lowerCAmelCase : Union[str, Any] = False def lowercase (_A ): """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore _lowerCAmelCase : Any = chain(next_number(_A ) ) _lowerCAmelCase : List[str] = number_chain while number < 1_0_0_0_0_0_0_0: _lowerCAmelCase : Tuple = number_chain number *= 1_0 return number_chain def lowercase (_A = 1_0_0_0_0_0_0_0 ): """simple docstring""" for i in range(1 , _A ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(_A ) if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution() = }''')
630
1
'''simple docstring''' import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowercase (_A , _A , _A ): """simple docstring""" if openai_config_file == "": _lowerCAmelCase : str = OpenAIGPTConfig() else: _lowerCAmelCase : List[str] = OpenAIGPTConfig.from_json_file(_A ) _lowerCAmelCase : Union[str, Any] = OpenAIGPTModel(_A ) # Load weights from numpy load_tf_weights_in_openai_gpt(_A , _A , _A ) # Save pytorch-model _lowerCAmelCase : str = pytorch_dump_folder_path + '/' + WEIGHTS_NAME _lowerCAmelCase : int = pytorch_dump_folder_path + '/' + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , _A ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--openai_checkpoint_folder_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--openai_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) lowerCAmelCase : Optional[Any] = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
630
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__="swish" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , snake_case__=0.25 , snake_case__=0.0 , snake_case__=0.0 , ): '''simple docstring''' _lowerCAmelCase : Tuple = parent _lowerCAmelCase : Optional[int] = batch_size _lowerCAmelCase : List[Any] = image_size _lowerCAmelCase : List[Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 ) _lowerCAmelCase : Optional[Any] = hidden_act _lowerCAmelCase : List[Any] = conv_kernel_size _lowerCAmelCase : Optional[Any] = output_stride _lowerCAmelCase : List[Any] = classifier_dropout_prob _lowerCAmelCase : str = use_labels _lowerCAmelCase : List[str] = is_training _lowerCAmelCase : Optional[int] = num_labels _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : str = scope _lowerCAmelCase : Any = width_multiplier _lowerCAmelCase : Union[str, Any] = ffn_dropout _lowerCAmelCase : Optional[int] = attn_dropout def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : Optional[Any] = None _lowerCAmelCase : Dict = None if self.use_labels: _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def a ( self ): '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = MobileViTVaModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : str = model(snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.num_labels _lowerCAmelCase : List[Any] = MobileViTVaForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Dict = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs _lowerCAmelCase : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) __magic_name__ = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : int = MobileViTVaModelTester(self ) _lowerCAmelCase : Dict = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def a ( self ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def a ( self ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : str = model_class(snake_case__ ) _lowerCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : int = [*signature.parameters.keys()] _lowerCAmelCase : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ): _lowerCAmelCase : Dict = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): _lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) _lowerCAmelCase : List[str] = outputs.hidden_states _lowerCAmelCase : List[str] = 5 self.assertEqual(len(snake_case__ ) , snake_case__ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _lowerCAmelCase : List[Any] = 2 for i in range(len(snake_case__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Optional[int] = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase : Any = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Dict = MobileViTVaModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def lowercase (): """simple docstring""" _lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def a ( self ): '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( snake_case__ ) _lowerCAmelCase : str = self.default_image_processor _lowerCAmelCase : Any = prepare_img() _lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : Tuple = model(**snake_case__ ) # verify the logits _lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) _lowerCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : Any = model.to(snake_case__ ) _lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : Optional[int] = prepare_img() _lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : int = model(**snake_case__ ) _lowerCAmelCase : Dict = outputs.logits # verify the logits _lowerCAmelCase : str = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , snake_case__ ) _lowerCAmelCase : Any = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : List[Any] = model.to(snake_case__ ) _lowerCAmelCase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : Tuple = prepare_img() _lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : Any = model(**snake_case__ ) _lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu() _lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] ) _lowerCAmelCase : List[Any] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , snake_case__ ) _lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) _lowerCAmelCase : Tuple = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , snake_case__ )
630
1
'''simple docstring''' from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class UpperCamelCase__ : """simple docstring""" pass
630
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Dict = 'The dog is cute and lives in the garden house' _lowerCAmelCase : List[str] = jnp.array([tokenizer.encode(snake_case__ )] ) _lowerCAmelCase : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _lowerCAmelCase : Tuple = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) _lowerCAmelCase : Union[str, Any] = model(snake_case__ )['last_hidden_state'] self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
630
1
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self ): '''simple docstring''' self.test() def a ( self ): '''simple docstring''' _lowerCAmelCase : str = 0 _lowerCAmelCase : List[str] = False while not completed: if counter == 1: self.reset() _lowerCAmelCase : Dict = self.advance() if not self.does_advance(snake_case__ ): raise Exception( 'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.update(snake_case__ ) counter += 1 if counter > 1_0000: raise Exception('update() does not fulfill the constraint.' ) if self.remaining() != 0: raise Exception('Custom Constraint is not defined correctly.' ) @abstractmethod def a ( self ): '''simple docstring''' raise NotImplementedError( F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def a ( self , snake_case__ ): '''simple docstring''' raise NotImplementedError( F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def a ( self , snake_case__ ): '''simple docstring''' raise NotImplementedError( F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def a ( self ): '''simple docstring''' raise NotImplementedError( F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def a ( self ): '''simple docstring''' raise NotImplementedError( F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def a ( self , snake_case__=False ): '''simple docstring''' raise NotImplementedError( F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' super(snake_case__ , self ).__init__() if not isinstance(snake_case__ , snake_case__ ) or len(snake_case__ ) == 0: raise ValueError(F'`token_ids` has to be a non-empty list, but is {token_ids}.' ) if any((not isinstance(snake_case__ , snake_case__ ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' ) _lowerCAmelCase : List[str] = token_ids _lowerCAmelCase : Optional[Any] = len(self.token_ids ) _lowerCAmelCase : List[str] = -1 # the index of the currently fulfilled step _lowerCAmelCase : int = False def a ( self ): '''simple docstring''' if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def a ( self , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(snake_case__ )}' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def a ( self , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(snake_case__ )}' ) _lowerCAmelCase : Union[str, Any] = False _lowerCAmelCase : int = False _lowerCAmelCase : Optional[int] = False if self.does_advance(snake_case__ ): self.fulfilled_idx += 1 _lowerCAmelCase : str = True if self.fulfilled_idx == (self.seqlen - 1): _lowerCAmelCase : Optional[int] = True _lowerCAmelCase : Dict = completed else: # failed to make progress. _lowerCAmelCase : List[Any] = True self.reset() return stepped, completed, reset def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = False _lowerCAmelCase : Optional[int] = 0 def a ( self ): '''simple docstring''' return self.seqlen - (self.fulfilled_idx + 1) def a ( self , snake_case__=False ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = PhrasalConstraint(self.token_ids ) if stateful: _lowerCAmelCase : Dict = self.seqlen _lowerCAmelCase : Optional[int] = self.fulfilled_idx _lowerCAmelCase : List[Any] = self.completed return new_constraint class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=True ): '''simple docstring''' _lowerCAmelCase : Any = max([len(snake_case__ ) for one in nested_token_ids] ) _lowerCAmelCase : int = {} for token_ids in nested_token_ids: _lowerCAmelCase : Tuple = root for tidx, token_id in enumerate(snake_case__ ): if token_id not in level: _lowerCAmelCase : Dict = {} _lowerCAmelCase : Tuple = level[token_id] if no_subsets and self.has_subsets(snake_case__ , snake_case__ ): raise ValueError( 'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is' F' {nested_token_ids}.' ) _lowerCAmelCase : Optional[int] = root def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[str] = self.trie for current_token in current_seq: _lowerCAmelCase : List[Any] = start[current_token] _lowerCAmelCase : Optional[int] = list(start.keys() ) return next_tokens def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = self.next_tokens(snake_case__ ) return len(snake_case__ ) == 0 def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = list(root.values() ) if len(snake_case__ ) == 0: return 1 else: return sum([self.count_leaves(snake_case__ ) for nn in next_nodes] ) def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.count_leaves(snake_case__ ) return len(snake_case__ ) != leaf_count class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' super(snake_case__ , self ).__init__() if not isinstance(snake_case__ , snake_case__ ) or len(snake_case__ ) == 0: raise ValueError(F'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' ) if any(not isinstance(snake_case__ , snake_case__ ) for token_ids in nested_token_ids ): raise ValueError(F'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' ) if any( any((not isinstance(snake_case__ , snake_case__ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' ) _lowerCAmelCase : Dict = DisjunctiveTrie(snake_case__ ) _lowerCAmelCase : List[str] = nested_token_ids _lowerCAmelCase : int = self.trie.max_height _lowerCAmelCase : str = [] _lowerCAmelCase : str = False def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.trie.next_tokens(self.current_seq ) if len(snake_case__ ) == 0: return None else: return token_list def a ( self , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case__ )}' ) _lowerCAmelCase : Optional[Any] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def a ( self , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case__ )}' ) _lowerCAmelCase : str = False _lowerCAmelCase : str = False _lowerCAmelCase : Tuple = False if self.does_advance(snake_case__ ): self.current_seq.append(snake_case__ ) _lowerCAmelCase : List[str] = True else: _lowerCAmelCase : Optional[int] = True self.reset() _lowerCAmelCase : List[str] = self.trie.reached_leaf(self.current_seq ) _lowerCAmelCase : Optional[Any] = completed return stepped, completed, reset def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = False _lowerCAmelCase : int = [] def a ( self ): '''simple docstring''' if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def a ( self , snake_case__=False ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = DisjunctiveConstraint(self.token_ids ) if stateful: _lowerCAmelCase : List[str] = self.seqlen _lowerCAmelCase : Optional[int] = self.current_seq _lowerCAmelCase : Tuple = self.completed return new_constraint class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = constraints # max # of steps required to fulfill a given constraint _lowerCAmelCase : List[str] = max([c.seqlen for c in constraints] ) _lowerCAmelCase : int = len(snake_case__ ) _lowerCAmelCase : Dict = False self.init_state() def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = [] _lowerCAmelCase : str = None _lowerCAmelCase : Optional[Any] = [constraint.copy(stateful=snake_case__ ) for constraint in self.constraints] def a ( self ): '''simple docstring''' _lowerCAmelCase : int = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" _lowerCAmelCase : Any = constraint.advance() if isinstance(snake_case__ , snake_case__ ): token_list.append(snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): token_list.extend(snake_case__ ) else: _lowerCAmelCase : Optional[Any] = self.inprogress_constraint.advance() if isinstance(snake_case__ , snake_case__ ): token_list.append(snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): token_list.extend(snake_case__ ) if len(snake_case__ ) == 0: return None else: return token_list def a ( self , snake_case__ ): '''simple docstring''' self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.add(snake_case__ ) # the entire list of constraints are fulfilled if self.completed: break def a ( self , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError(F'`token_id` should be an `int`, but is `{token_id}`.' ) _lowerCAmelCase , _lowerCAmelCase : List[Any] = False, False if self.completed: _lowerCAmelCase : List[Any] = True _lowerCAmelCase : List[Any] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = self.inprogress_constraint.update(snake_case__ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=snake_case__ ) ) _lowerCAmelCase : int = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) _lowerCAmelCase : Optional[int] = None if len(self.pending_constraints ) == 0: # we're done! _lowerCAmelCase : Tuple = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(snake_case__ ): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = pending_constraint.update(snake_case__ ) if not stepped: raise Exception( '`constraint.update(token_id)` is not yielding incremental progress, ' 'even though `constraint.does_advance(token_id)` is true.' ) if complete: self.complete_constraints.append(snake_case__ ) _lowerCAmelCase : str = None if not complete and stepped: _lowerCAmelCase : int = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". _lowerCAmelCase : Any = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. _lowerCAmelCase : List[str] = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def a ( self , snake_case__=True ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: _lowerCAmelCase : List[str] = [ constraint.copy(stateful=snake_case__ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: _lowerCAmelCase : str = self.inprogress_constraint.copy(stateful=snake_case__ ) _lowerCAmelCase : List[str] = [constraint.copy() for constraint in self.pending_constraints] return new_state
630
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Dict = len(_A ) while cur > 1: # Find the maximum number in arr _lowerCAmelCase : int = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _lowerCAmelCase : Dict = arr[mi::-1] + arr[mi + 1 : len(_A )] # Reverse whole list _lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(_A )] cur -= 1 return arr if __name__ == "__main__": lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
630
1
'''simple docstring''' import argparse import collections import json import os import re import string import sys import numpy as np lowerCAmelCase : Optional[Any] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) lowerCAmelCase : int = None def lowercase (): """simple docstring""" _lowerCAmelCase : Tuple = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' ) parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' ) parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' ) parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' ) parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' ) parser.add_argument( '--na-prob-thresh' , '-t' , type=_A , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=_A , help='Save precision-recall curves to directory.' ) parser.add_argument('--verbose' , '-v' , action='store_true' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _lowerCAmelCase : Dict = bool(qa['answers']['text'] ) return qid_to_has_ans def lowercase (_A ): """simple docstring""" def remove_articles(_A ): return ARTICLES_REGEX.sub(' ' , _A ) def white_space_fix(_A ): return " ".join(text.split() ) def remove_punc(_A ): _lowerCAmelCase : Optional[int] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_A ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) ) def lowercase (_A ): """simple docstring""" if not s: return [] return normalize_answer(_A ).split() def lowercase (_A , _A ): """simple docstring""" return int(normalize_answer(_A ) == normalize_answer(_A ) ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = get_tokens(_A ) _lowerCAmelCase : Optional[Any] = get_tokens(_A ) _lowerCAmelCase : Tuple = collections.Counter(_A ) & collections.Counter(_A ) _lowerCAmelCase : Any = sum(common.values() ) if len(_A ) == 0 or len(_A ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 _lowerCAmelCase : Tuple = 1.0 * num_same / len(_A ) _lowerCAmelCase : Dict = 1.0 * num_same / len(_A ) _lowerCAmelCase : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Optional[Any] = {} _lowerCAmelCase : int = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _lowerCAmelCase : str = qa['id'] _lowerCAmelCase : Optional[Any] = [t for t in qa['answers']['text'] if normalize_answer(_A )] if not gold_answers: # For unanswerable questions, only correct answer is empty string _lowerCAmelCase : Tuple = [''] if qid not in preds: print(f'Missing prediction for {qid}' ) continue _lowerCAmelCase : Optional[int] = preds[qid] # Take max over all gold answers _lowerCAmelCase : List[str] = max(compute_exact(_A , _A ) for a in gold_answers ) _lowerCAmelCase : int = max(compute_fa(_A , _A ) for a in gold_answers ) return exact_scores, fa_scores def lowercase (_A , _A , _A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = {} for qid, s in scores.items(): _lowerCAmelCase : str = na_probs[qid] > na_prob_thresh if pred_na: _lowerCAmelCase : List[str] = float(not qid_to_has_ans[qid] ) else: _lowerCAmelCase : Dict = s return new_scores def lowercase (_A , _A , _A=None ): """simple docstring""" if not qid_list: _lowerCAmelCase : Tuple = len(_A ) return collections.OrderedDict( [ ('exact', 100.0 * sum(exact_scores.values() ) / total), ('f1', 100.0 * sum(fa_scores.values() ) / total), ('total', total), ] ) else: _lowerCAmelCase : List[Any] = len(_A ) return collections.OrderedDict( [ ('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ('total', total), ] ) def lowercase (_A , _A , _A ): """simple docstring""" for k in new_eval: _lowerCAmelCase : Optional[int] = new_eval[k] def lowercase (_A , _A , _A , _A ): """simple docstring""" plt.step(_A , _A , color='b' , alpha=0.2 , where='post' ) plt.fill_between(_A , _A , step='post' , alpha=0.2 , color='b' ) plt.xlabel('Recall' ) plt.ylabel('Precision' ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(_A ) plt.savefig(_A ) plt.clf() def lowercase (_A , _A , _A , _A , _A=None , _A=None ): """simple docstring""" _lowerCAmelCase : str = sorted(_A , key=lambda _A : na_probs[k] ) _lowerCAmelCase : List[str] = 0.0 _lowerCAmelCase : Union[str, Any] = 1.0 _lowerCAmelCase : List[str] = 0.0 _lowerCAmelCase : Union[str, Any] = [1.0] _lowerCAmelCase : Any = [0.0] _lowerCAmelCase : List[str] = 0.0 for i, qid in enumerate(_A ): if qid_to_has_ans[qid]: true_pos += scores[qid] _lowerCAmelCase : int = true_pos / float(i + 1 ) _lowerCAmelCase : List[str] = true_pos / float(_A ) if i == len(_A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_A ) recalls.append(_A ) if out_image: plot_pr_curve(_A , _A , _A , _A ) return {"ap": 100.0 * avg_prec} def lowercase (_A , _A , _A , _A , _A , _A ): """simple docstring""" if out_image_dir and not os.path.exists(_A ): os.makedirs(_A ) _lowerCAmelCase : int = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return _lowerCAmelCase : Optional[int] = make_precision_recall_eval( _A , _A , _A , _A , out_image=os.path.join(_A , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , ) _lowerCAmelCase : Any = make_precision_recall_eval( _A , _A , _A , _A , out_image=os.path.join(_A , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , ) _lowerCAmelCase : List[Any] = {k: float(_A ) for k, v in qid_to_has_ans.items()} _lowerCAmelCase : Any = make_precision_recall_eval( _A , _A , _A , _A , out_image=os.path.join(_A , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(_A , _A , 'pr_exact' ) merge_eval(_A , _A , 'pr_f1' ) merge_eval(_A , _A , 'pr_oracle' ) def lowercase (_A , _A , _A , _A ): """simple docstring""" if not qid_list: return _lowerCAmelCase : List[Any] = [na_probs[k] for k in qid_list] _lowerCAmelCase : Dict = np.ones_like(_A ) / float(len(_A ) ) plt.hist(_A , weights=_A , bins=2_0 , range=(0.0, 1.0) ) plt.xlabel('Model probability of no-answer' ) plt.ylabel('Proportion of dataset' ) plt.title(f'Histogram of no-answer probability: {name}' ) plt.savefig(os.path.join(_A , f'na_prob_hist_{name}.png' ) ) plt.clf() def lowercase (_A , _A , _A , _A ): """simple docstring""" _lowerCAmelCase : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) _lowerCAmelCase : Tuple = num_no_ans _lowerCAmelCase : Union[str, Any] = cur_score _lowerCAmelCase : str = 0.0 _lowerCAmelCase : Tuple = sorted(_A , key=lambda _A : na_probs[k] ) for i, qid in enumerate(_A ): if qid not in scores: continue if qid_to_has_ans[qid]: _lowerCAmelCase : Tuple = scores[qid] else: if preds[qid]: _lowerCAmelCase : Dict = -1 else: _lowerCAmelCase : Tuple = 0 cur_score += diff if cur_score > best_score: _lowerCAmelCase : Any = cur_score _lowerCAmelCase : Union[str, Any] = na_probs[qid] return 100.0 * best_score / len(_A ), best_thresh def lowercase (_A , _A , _A , _A , _A , _A ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase : Tuple = find_best_thresh(_A , _A , _A , _A ) _lowerCAmelCase , _lowerCAmelCase : int = find_best_thresh(_A , _A , _A , _A ) _lowerCAmelCase : Optional[Any] = best_exact _lowerCAmelCase : Any = exact_thresh _lowerCAmelCase : Any = best_fa _lowerCAmelCase : Any = fa_thresh def lowercase (): """simple docstring""" with open(OPTS.data_file ) as f: _lowerCAmelCase : Tuple = json.load(_A ) _lowerCAmelCase : List[str] = dataset_json['data'] with open(OPTS.pred_file ) as f: _lowerCAmelCase : str = json.load(_A ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: _lowerCAmelCase : Tuple = json.load(_A ) else: _lowerCAmelCase : Any = {k: 0.0 for k in preds} _lowerCAmelCase : Optional[int] = make_qid_to_has_ans(_A ) # maps qid to True/False _lowerCAmelCase : List[Any] = [k for k, v in qid_to_has_ans.items() if v] _lowerCAmelCase : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] _lowerCAmelCase , _lowerCAmelCase : Tuple = get_raw_scores(_A , _A ) _lowerCAmelCase : List[Any] = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh ) _lowerCAmelCase : Union[str, Any] = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh ) _lowerCAmelCase : Any = make_eval_dict(_A , _A ) if has_ans_qids: _lowerCAmelCase : Optional[int] = make_eval_dict(_A , _A , qid_list=_A ) merge_eval(_A , _A , 'HasAns' ) if no_ans_qids: _lowerCAmelCase : Optional[Any] = make_eval_dict(_A , _A , qid_list=_A ) merge_eval(_A , _A , 'NoAns' ) if OPTS.na_prob_file: find_all_best_thresh(_A , _A , _A , _A , _A , _A ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_A , _A , _A , _A , _A , OPTS.out_image_dir ) histogram_na_prob(_A , _A , OPTS.out_image_dir , 'hasAns' ) histogram_na_prob(_A , _A , OPTS.out_image_dir , 'noAns' ) if OPTS.out_file: with open(OPTS.out_file , 'w' ) as f: json.dump(_A , _A ) else: print(json.dumps(_A , indent=2 ) ) if __name__ == "__main__": lowerCAmelCase : Tuple = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
630
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : str = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "gptj" __magic_name__ = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : int = vocab_size _lowerCAmelCase : Optional[int] = n_positions _lowerCAmelCase : Optional[int] = n_embd _lowerCAmelCase : Optional[int] = n_layer _lowerCAmelCase : str = n_head _lowerCAmelCase : Tuple = n_inner _lowerCAmelCase : Tuple = rotary_dim _lowerCAmelCase : Optional[int] = activation_function _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : List[str] = embd_pdrop _lowerCAmelCase : int = attn_pdrop _lowerCAmelCase : Any = layer_norm_epsilon _lowerCAmelCase : Optional[int] = initializer_range _lowerCAmelCase : List[str] = use_cache _lowerCAmelCase : Dict = bos_token_id _lowerCAmelCase : Any = eos_token_id super().__init__( bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ ) if not getattr(self._config , 'pad_token_id' , snake_case__ ): # TODO: how to do that better? _lowerCAmelCase : Any = 0 @property def a ( self ): '''simple docstring''' _lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(snake_case__ , direction='inputs' ) _lowerCAmelCase : int = {0: 'batch', 1: 'past_sequence + sequence'} else: _lowerCAmelCase : int = {0: 'batch', 1: 'sequence'} return common_inputs @property def a ( self ): '''simple docstring''' return self._config.n_layer @property def a ( self ): '''simple docstring''' return self._config.n_head def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = super(snake_case__ , self ).generate_dummy_inputs( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) # We need to order the input in the way they appears in the forward() _lowerCAmelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs['input_ids'].shape # Not using the same length for past_key_values _lowerCAmelCase : Any = seqlen + 2 _lowerCAmelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _lowerCAmelCase : Tuple = [ (torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers ) ] _lowerCAmelCase : Tuple = common_inputs['attention_mask'] if self.use_past: _lowerCAmelCase : Any = ordered_inputs['attention_mask'].dtype _lowerCAmelCase : Union[str, Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 ) return ordered_inputs @property def a ( self ): '''simple docstring''' return 13
630
1
'''simple docstring''' import re def lowercase (_A ): """simple docstring""" if len(re.findall('[ATCG]' , _A ) ) != len(_A ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
630
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Any = { """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' import fire from utils import calculate_rouge, save_json def lowercase (_A , _A , _A=None , **_A ): """simple docstring""" _lowerCAmelCase : Tuple = [x.strip() for x in open(_A ).readlines()] _lowerCAmelCase : List[Any] = [x.strip() for x in open(_A ).readlines()][: len(_A )] _lowerCAmelCase : Optional[Any] = calculate_rouge(_A , _A , **_A ) if save_path is not None: save_json(_A , _A , indent=_A ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
630
'''simple docstring''' import math from datetime import datetime, timedelta def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[Any] = year % 1_9 _lowerCAmelCase : Any = year % 4 _lowerCAmelCase : Optional[int] = year % 7 _lowerCAmelCase : int = math.floor(year / 1_0_0 ) _lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) _lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4 _lowerCAmelCase : Dict = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 _lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon _lowerCAmelCase : Union[str, Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(_A , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(_A , 4 , 1_8 ) else: return datetime(_A , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (19_94, 20_00, 20_10, 20_21, 20_23): lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was""" print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
630
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor lowerCAmelCase : List[Any] = logging.get_logger(__name__) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.' , snake_case__ , ) super().__init__(*snake_case__ , **snake_case__ )
630
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60] _lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12] _lowerCAmelCase : Dict = 100 self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex( snake_case__ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
630
1
'''simple docstring''' from __future__ import annotations def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[str] = 2 _lowerCAmelCase : List[Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_A ) if n > 1: factors.append(_A ) return factors if __name__ == "__main__": import doctest doctest.testmod()
630
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps _lowerCAmelCase : Any = boundary[0] _lowerCAmelCase : List[str] = boundary[1] _lowerCAmelCase : Tuple = make_points(_A , _A , _A ) _lowerCAmelCase : Tuple = 0.0 y += (h / 2.0) * f(_A ) for i in x_i: # print(i) y += h * f(_A ) y += (h / 2.0) * f(_A ) return y def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = a + h while x < (b - h): yield x _lowerCAmelCase : Any = x + h def lowercase (_A ): # enter your function here """simple docstring""" _lowerCAmelCase : int = (x - 0) * (x - 0) return y def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration _lowerCAmelCase : Dict = 1.0 # Upper bound of integration _lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution _lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration _lowerCAmelCase : List[Any] = method_a(_A , _A ) print(f'y = {y}' ) if __name__ == "__main__": main()
630
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = LDMTextToImagePipeline __magic_name__ = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } __magic_name__ = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } __magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS __magic_name__ = False def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) _lowerCAmelCase : Optional[int] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , ) torch.manual_seed(0 ) _lowerCAmelCase : Optional[Any] = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowerCAmelCase : Union[str, Any] = CLIPTextModel(snake_case__ ) _lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _lowerCAmelCase : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vqvae': vae, 'bert': text_encoder, 'tokenizer': tokenizer, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : Any = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase : Any = self.get_dummy_components() _lowerCAmelCase : Optional[int] = LDMTextToImagePipeline(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Any = self.get_dummy_inputs(snake_case__ ) _lowerCAmelCase : str = pipe(**snake_case__ ).images _lowerCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) _lowerCAmelCase : Any = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : int = torch.manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) ) _lowerCAmelCase : Dict = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ ) _lowerCAmelCase : Optional[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Tuple = self.get_inputs(snake_case__ ) _lowerCAmelCase : Tuple = pipe(**snake_case__ ).images _lowerCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) _lowerCAmelCase : int = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] ) _lowerCAmelCase : Optional[int] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ ) _lowerCAmelCase : Any = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) ) _lowerCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ ) _lowerCAmelCase : int = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 50, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[int] = self.get_inputs(snake_case__ ) _lowerCAmelCase : Optional[Any] = pipe(**snake_case__ ).images[0] _lowerCAmelCase : int = load_numpy( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' ) _lowerCAmelCase : List[Any] = np.abs(expected_image - image ).max() assert max_diff < 1E-3
630
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase : int = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' def lowercase (_A ): """simple docstring""" stooge(_A , 0 , len(_A ) - 1 ) return arr def lowercase (_A , _A , _A ): """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: _lowerCAmelCase , _lowerCAmelCase : List[str] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: _lowerCAmelCase : Union[str, Any] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(_A , _A , (h - t) ) # Recursively sort last 2/3 elements stooge(_A , i + t , (_A) ) # Recursively sort first 2/3 elements stooge(_A , _A , (h - t) ) if __name__ == "__main__": lowerCAmelCase : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase : Dict = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
630
'''simple docstring''' from collections import Counter from timeit import timeit def lowercase (_A = "" , ): """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2 def lowercase (_A = "" ): """simple docstring""" if len(_A ) == 0: return True _lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string _lowerCAmelCase : dict[str, int] = {} for character in lower_case_input_str: _lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1 _lowerCAmelCase : List[Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def lowercase (_A = "" ): """simple docstring""" print('\nFor string = ' , _A , ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) print( '> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) if __name__ == "__main__": lowerCAmelCase : Tuple = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
630
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Optional[Any] = { """configuration_bigbird_pegasus""": [ """BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BigBirdPegasusConfig""", """BigBirdPegasusOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = [ """BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""", """BigBirdPegasusForCausalLM""", """BigBirdPegasusForConditionalGeneration""", """BigBirdPegasusForQuestionAnswering""", """BigBirdPegasusForSequenceClassification""", """BigBirdPegasusModel""", """BigBirdPegasusPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : int = { """facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "data2vec-text" def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Tuple = hidden_size _lowerCAmelCase : Dict = num_hidden_layers _lowerCAmelCase : int = num_attention_heads _lowerCAmelCase : str = hidden_act _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : Any = hidden_dropout_prob _lowerCAmelCase : Optional[int] = attention_probs_dropout_prob _lowerCAmelCase : str = max_position_embeddings _lowerCAmelCase : Any = type_vocab_size _lowerCAmelCase : int = initializer_range _lowerCAmelCase : List[str] = layer_norm_eps _lowerCAmelCase : List[Any] = position_embedding_type _lowerCAmelCase : str = use_cache _lowerCAmelCase : Union[str, Any] = classifier_dropout class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def a ( self ): '''simple docstring''' if self.task == "multiple-choice": _lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
630
1
'''simple docstring''' def lowercase (_A = 1_0_0 ): """simple docstring""" _lowerCAmelCase : List[str] = n * (n + 1) * (2 * n + 1) / 6 _lowerCAmelCase : int = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'''{solution() = }''')
630
'''simple docstring''' import pytest import datasets # Import fixture modules as plugins lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""] def lowercase (_A , _A ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def lowercase (_A ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache' _lowerCAmelCase : Dict = test_hf_cache_home / 'datasets' _lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics' _lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) ) _lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) ) _lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) ) @pytest.fixture(autouse=_A , scope='session' ) def lowercase (): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_A ) def lowercase (_A ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A ) @pytest.fixture def lowercase (_A ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
630
1
'''simple docstring''' from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup lowerCAmelCase : Dict = """https://www.indeed.co.in/jobs?q=mobile+app+development&l=""" def lowercase (_A = "mumbai" ): """simple docstring""" _lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(url + location ).content , 'html.parser' ) # This attribute finds out all the specifics listed in a job for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ): _lowerCAmelCase : List[Any] = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip() _lowerCAmelCase : Dict = job.find('span' , {'class': 'company'} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("""Bangalore"""), 1): print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
630
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase : str = logging.get_logger(__name__) # General docstring lowerCAmelCase : Optional[Any] = """RegNetConfig""" # Base docstring lowerCAmelCase : int = """facebook/regnet-y-040""" lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7] # Image classification docstring lowerCAmelCase : Any = """facebook/regnet-y-040""" lowerCAmelCase : Optional[Any] = """tabby, tabby cat""" lowerCAmelCase : Tuple = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , ) _lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) _lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) ) _lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ ) _lowerCAmelCase : int = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : str = config.num_channels _lowerCAmelCase : List[Any] = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) ) _lowerCAmelCase : Tuple = self.embedder(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' ) _lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ ) class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' ) _lowerCAmelCase : str = [ tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.pooler(snake_case__ ) for layer_module in self.attention: _lowerCAmelCase : Tuple = layer_module(snake_case__ ) _lowerCAmelCase : Optional[Any] = hidden_state * pooled return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1 _lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width ) _lowerCAmelCase : Optional[Any] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _lowerCAmelCase : Any = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ), ] _lowerCAmelCase : List[str] = ACTaFN[config.hidden_act] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = hidden_state for layer_module in self.layers: _lowerCAmelCase : int = layer_module(snake_case__ ) _lowerCAmelCase : int = self.shortcut(snake_case__ ) hidden_state += residual _lowerCAmelCase : Tuple = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1 _lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width ) _lowerCAmelCase : Optional[Any] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) _lowerCAmelCase : Tuple = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ), ] _lowerCAmelCase : Tuple = ACTaFN[config.hidden_act] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = hidden_state for layer_module in self.layers: _lowerCAmelCase : List[Any] = layer_module(snake_case__ ) _lowerCAmelCase : Tuple = self.shortcut(snake_case__ ) hidden_state += residual _lowerCAmelCase : str = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer _lowerCAmelCase : Optional[int] = [ # downsampling is done in the first layer with stride of 2 layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ), *[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )], ] def a ( self , snake_case__ ): '''simple docstring''' for layer_module in self.layers: _lowerCAmelCase : int = layer_module(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : str = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) _lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) ) def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ): '''simple docstring''' _lowerCAmelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _lowerCAmelCase : str = hidden_states + (hidden_state,) _lowerCAmelCase : List[str] = stage_module(snake_case__ ) if output_hidden_states: _lowerCAmelCase : Dict = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) @keras_serializable class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" __magic_name__ = RegNetConfig def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = config _lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' ) _lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' ) _lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' ) @unpack_inputs def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' _lowerCAmelCase : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ ) _lowerCAmelCase : List[str] = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) _lowerCAmelCase : List[Any] = encoder_outputs[0] _lowerCAmelCase : Tuple = self.pooler(snake_case__ ) # Change to NCHW output format have uniformity in the modules _lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) _lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = RegNetConfig __magic_name__ = "regnet" __magic_name__ = "pixel_values" @property def a ( self ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} lowerCAmelCase : List[Any] = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ lowerCAmelCase : Dict = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) _lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : str = self.regnet( pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = config.num_labels _lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' ) # classification head _lowerCAmelCase : Optional[int] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : Dict = self.regnet( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) _lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] _lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ ) _lowerCAmelCase : Tuple = self.classifier[1](snake_case__ ) _lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ ) if not return_dict: _lowerCAmelCase : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
630
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowerCAmelCase : Union[str, Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
'''simple docstring''' from typing import Any def lowercase (_A ): """simple docstring""" if not input_list: return [] _lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list] _lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
630
1
'''simple docstring''' # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ = True , snake_case__ = False ): '''simple docstring''' _lowerCAmelCase : str = scheduler _lowerCAmelCase : Optional[int] = optimizers if isinstance(snake_case__ , (list, tuple) ) else [optimizers] _lowerCAmelCase : Optional[int] = split_batches _lowerCAmelCase : Any = step_with_optimizer _lowerCAmelCase : Tuple = GradientState() def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*snake_case__ , **snake_case__ ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*snake_case__ , **snake_case__ ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step _lowerCAmelCase : Dict = AcceleratorState().num_processes for _ in range(snake_case__ ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*snake_case__ , **snake_case__ ) else: self.scheduler.step(*snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' return self.scheduler.get_last_lr() def a ( self ): '''simple docstring''' return self.scheduler.state_dict() def a ( self , snake_case__ ): '''simple docstring''' self.scheduler.load_state_dict(snake_case__ ) def a ( self ): '''simple docstring''' return self.scheduler.get_lr() def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.scheduler.print_lr(*snake_case__ , **snake_case__ )
630
'''simple docstring''' from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
630
1
'''simple docstring''' from sklearn.metrics import recall_score import datasets lowerCAmelCase : Union[str, Any] = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ lowerCAmelCase : List[Any] = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ lowerCAmelCase : int = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def a ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , ) def a ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=1 , snake_case__="binary" , snake_case__=None , snake_case__="warn" , ): '''simple docstring''' _lowerCAmelCase : List[str] = recall_score( snake_case__ , snake_case__ , labels=snake_case__ , pos_label=snake_case__ , average=snake_case__ , sample_weight=snake_case__ , zero_division=snake_case__ , ) return {"recall": float(snake_case__ ) if score.size == 1 else score}
630
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""} lowerCAmelCase : Optional[int] = { """vocab_file""": { """AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""", } } lowerCAmelCase : Union[str, Any] = { """AI-Sweden/gpt-sw3-126m""": 20_48, """AI-Sweden/gpt-sw3-350m""": 20_48, """AI-Sweden/gpt-sw3-1.6b""": 20_48, """AI-Sweden/gpt-sw3-6.7b""": 20_48, """AI-Sweden/gpt-sw3-20b""": 20_48, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase : List[Any] = kwargs.get('name_or_path' ) if name_or_path is None: logger.warning( 'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,' ' you are testing the model, this can safely be ignored' ) _lowerCAmelCase : Any = 'None' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing _lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token _lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: _lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token _lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token else: _lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token _lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token super().__init__( do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) _lowerCAmelCase : Union[str, Any] = do_lower_case _lowerCAmelCase : Optional[int] = remove_space _lowerCAmelCase : Any = keep_accents _lowerCAmelCase : Optional[int] = vocab_file _lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case__ ) # Used for whitespace normalization in input texts # fmt : off _lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '„'} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing _lowerCAmelCase : Optional[Any] = re.compile( F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' ) def __getstate__( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.__dict__.copy() _lowerCAmelCase : int = None return state def __setstate__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _lowerCAmelCase : int = {} _lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def a ( self ): '''simple docstring''' return len(self.sp_model ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ ) # Normalize whitespaces _lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] ) # NFC Unicode normalization _lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ ) return text def a ( self , snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.preprocess_text(snake_case__ ) return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.PieceToId(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.IdToPiece(snake_case__ ) @staticmethod def a ( snake_case__ ): '''simple docstring''' return out_string def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = [] _lowerCAmelCase : Optional[Any] = '' _lowerCAmelCase : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case__ ) + token _lowerCAmelCase : Union[str, Any] = True _lowerCAmelCase : List[Any] = [] else: current_sub_tokens.append(snake_case__ ) _lowerCAmelCase : List[Any] = False out_string += self.sp_model.decode(snake_case__ ) return out_string def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : int = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case__ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case__ , 'wb' ) as fi: _lowerCAmelCase : Any = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (out_vocab_file,) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ ) _lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ ) else: _lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text] _lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ ) if return_tensors is True or return_tensors == "pt": _lowerCAmelCase : int = torch.tensor(snake_case__ ) return token_ids def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.decode(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()] _lowerCAmelCase : str = ( F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:' ) return self.encode(text=snake_case__ )
630
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = parent _lowerCAmelCase : str = batch_size _lowerCAmelCase : Optional[int] = image_size _lowerCAmelCase : Optional[int] = patch_size _lowerCAmelCase : Optional[int] = num_channels _lowerCAmelCase : int = is_training _lowerCAmelCase : Tuple = use_labels _lowerCAmelCase : Optional[Any] = hidden_size _lowerCAmelCase : List[str] = num_hidden_layers _lowerCAmelCase : Any = num_attention_heads _lowerCAmelCase : Optional[Any] = intermediate_size _lowerCAmelCase : Tuple = hidden_act _lowerCAmelCase : List[str] = hidden_dropout_prob _lowerCAmelCase : Tuple = attention_probs_dropout_prob _lowerCAmelCase : Union[str, Any] = type_sequence_label_size _lowerCAmelCase : Dict = initializer_range _lowerCAmelCase : Union[str, Any] = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _lowerCAmelCase : Tuple = (image_size // patch_size) ** 2 _lowerCAmelCase : Any = num_patches + 1 def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : int = None if self.use_labels: _lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : int = self.get_config() return config, pixel_values, labels def a ( self ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[str] = TFViTModel(config=snake_case__ ) _lowerCAmelCase : List[Any] = model(snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. _lowerCAmelCase : Tuple = self.image_size // 2 _lowerCAmelCase : str = pixel_values[:, :, :image_size, :image_size] _lowerCAmelCase : Optional[int] = model(snake_case__ , interpolate_pos_encoding=snake_case__ , training=snake_case__ ) _lowerCAmelCase : Tuple = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = self.type_sequence_label_size _lowerCAmelCase : Optional[Any] = TFViTForImageClassification(snake_case__ ) _lowerCAmelCase : Dict = model(snake_case__ , labels=snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. _lowerCAmelCase : List[Any] = self.image_size // 2 _lowerCAmelCase : str = pixel_values[:, :, :image_size, :image_size] _lowerCAmelCase : Union[str, Any] = model(snake_case__ , interpolate_pos_encoding=snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCAmelCase : int = 1 _lowerCAmelCase : Any = TFViTForImageClassification(snake_case__ ) _lowerCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase : Union[str, Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = config_and_inputs _lowerCAmelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () __magic_name__ = ( {"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification} if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = TFViTModelTester(self ) _lowerCAmelCase : Any = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='ViT does not use inputs_embeds' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : List[str] = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _lowerCAmelCase : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Layer ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Dict = model_class(snake_case__ ) _lowerCAmelCase : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : Any = [*signature.parameters.keys()] _lowerCAmelCase : Tuple = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = TFViTModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(snake_case__ ) def lowercase (): """simple docstring""" _lowerCAmelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def a ( self ): '''simple docstring''' return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ) _lowerCAmelCase : int = self.default_image_processor _lowerCAmelCase : Dict = prepare_img() _lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='tf' ) # forward pass _lowerCAmelCase : Any = model(**snake_case__ ) # verify the logits _lowerCAmelCase : Tuple = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) _lowerCAmelCase : Union[str, Any] = tf.constant([-0.2744, 0.8215, -0.0836] ) tf.debugging.assert_near(outputs.logits[0, :3] , snake_case__ , atol=1E-4 )
630
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = (DDPMScheduler,) def a ( self , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**snake_case__ ) return config def a ( self ): '''simple docstring''' for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def a ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def a ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def a ( self ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=snake_case__ ) def a ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def a ( self ): '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , ) def a ( self ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def a ( self ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[Any] = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[Any] = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = len(snake_case__ ) _lowerCAmelCase : str = self.dummy_model() _lowerCAmelCase : str = self.dummy_sample_deter _lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 ) for t in reversed(range(snake_case__ ) ): # 1. predict noise residual _lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance _lowerCAmelCase : Dict = pred_prev_sample _lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case__ ) ) _lowerCAmelCase : List[str] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.scheduler_classes[0] _lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='v_prediction' ) _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = len(snake_case__ ) _lowerCAmelCase : Any = self.dummy_model() _lowerCAmelCase : Tuple = self.dummy_sample_deter _lowerCAmelCase : Optional[int] = torch.manual_seed(0 ) for t in reversed(range(snake_case__ ) ): # 1. predict noise residual _lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance _lowerCAmelCase : Tuple = pred_prev_sample _lowerCAmelCase : Any = torch.sum(torch.abs(snake_case__ ) ) _lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[int] = self.get_scheduler_config() _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=snake_case__ ) _lowerCAmelCase : Union[str, Any] = scheduler.timesteps for i, timestep in enumerate(snake_case__ ): if i == len(snake_case__ ) - 1: _lowerCAmelCase : str = -1 else: _lowerCAmelCase : Optional[Any] = timesteps[i + 1] _lowerCAmelCase : int = scheduler.previous_timestep(snake_case__ ) _lowerCAmelCase : int = prev_t.item() self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : Tuple = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(snake_case__ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : List[str] = self.get_scheduler_config() _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = [100, 87, 50, 1, 0] _lowerCAmelCase : int = len(snake_case__ ) with self.assertRaises(snake_case__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : int = self.get_scheduler_config() _lowerCAmelCase : Any = scheduler_class(**snake_case__ ) _lowerCAmelCase : Any = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=snake_case__ )
630
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings lowerCAmelCase : List[str] = r""" [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `\" / \"`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `\" // \"`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `\"train\"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `\"compressed\"`) The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and `\"compressed\"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a \"dummy\" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. """ @add_start_docstrings(SCREAMING_SNAKE_CASE_ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "rag" __magic_name__ = True def __init__( self , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=" / " , snake_case__=" // " , snake_case__=5 , snake_case__=300 , snake_case__=768 , snake_case__=8 , snake_case__="wiki_dpr" , snake_case__="train" , snake_case__="compressed" , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=0.0 , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( bos_token_id=snake_case__ , pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , prefix=snake_case__ , vocab_size=snake_case__ , **snake_case__ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _lowerCAmelCase : Dict = kwargs.pop('question_encoder' ) _lowerCAmelCase : int = question_encoder_config.pop('model_type' ) _lowerCAmelCase : str = kwargs.pop('generator' ) _lowerCAmelCase : Tuple = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig _lowerCAmelCase : Any = AutoConfig.for_model(snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = AutoConfig.for_model(snake_case__ , **snake_case__ ) _lowerCAmelCase : Tuple = reduce_loss _lowerCAmelCase : Optional[int] = label_smoothing _lowerCAmelCase : str = exclude_bos_score _lowerCAmelCase : int = do_marginalize _lowerCAmelCase : Union[str, Any] = title_sep _lowerCAmelCase : Union[str, Any] = doc_sep _lowerCAmelCase : Dict = n_docs _lowerCAmelCase : List[str] = max_combined_length _lowerCAmelCase : Dict = dataset _lowerCAmelCase : Dict = dataset_split _lowerCAmelCase : Tuple = index_name _lowerCAmelCase : List[Any] = retrieval_vector_size _lowerCAmelCase : Dict = retrieval_batch_size _lowerCAmelCase : Dict = passages_path _lowerCAmelCase : str = index_path _lowerCAmelCase : Optional[int] = use_dummy_dataset _lowerCAmelCase : Dict = output_retrieved _lowerCAmelCase : str = do_deduplication _lowerCAmelCase : Tuple = use_cache if self.forced_eos_token_id is None: _lowerCAmelCase : str = getattr(self.generator , 'forced_eos_token_id' , snake_case__ ) @classmethod def a ( cls , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ ) _lowerCAmelCase : int = self.question_encoder.to_dict() _lowerCAmelCase : List[str] = self.generator.to_dict() _lowerCAmelCase : str = self.__class__.model_type return output
630
'''simple docstring''' import socket def lowercase (): """simple docstring""" _lowerCAmelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _lowerCAmelCase : Optional[int] = socket.gethostname() _lowerCAmelCase : Any = 1_2_3_1_2 sock.connect((host, port) ) sock.send(B'Hello server!' ) with open('Received_file' , 'wb' ) as out_file: print('File opened' ) print('Receiving data...' ) while True: _lowerCAmelCase : Union[str, Any] = sock.recv(1_0_2_4 ) if not data: break out_file.write(_A ) print('Successfully received the file' ) sock.close() print('Connection closed' ) if __name__ == "__main__": main()
630
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = parent _lowerCAmelCase : Any = batch_size _lowerCAmelCase : Any = seq_length _lowerCAmelCase : Union[str, Any] = is_training _lowerCAmelCase : Tuple = use_token_type_ids _lowerCAmelCase : int = use_labels _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = hidden_size _lowerCAmelCase : int = num_hidden_layers _lowerCAmelCase : List[str] = num_attention_heads _lowerCAmelCase : Optional[Any] = intermediate_size _lowerCAmelCase : Optional[int] = hidden_act _lowerCAmelCase : str = hidden_dropout_prob _lowerCAmelCase : int = attention_probs_dropout_prob _lowerCAmelCase : Union[str, Any] = max_position_embeddings _lowerCAmelCase : int = type_vocab_size _lowerCAmelCase : Union[str, Any] = type_sequence_label_size _lowerCAmelCase : Optional[int] = initializer_range _lowerCAmelCase : Tuple = num_labels _lowerCAmelCase : List[str] = num_choices _lowerCAmelCase : List[str] = scope _lowerCAmelCase : Optional[Any] = self.vocab_size - 1 def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase : List[str] = None if self.use_token_type_ids: _lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase : int = None _lowerCAmelCase : List[Any] = None _lowerCAmelCase : Optional[int] = None if self.use_labels: _lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase : Tuple = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) _lowerCAmelCase : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = OpenAIGPTModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : str = model(snake_case__ , token_type_ids=snake_case__ , head_mask=snake_case__ ) _lowerCAmelCase : int = model(snake_case__ , token_type_ids=snake_case__ ) _lowerCAmelCase : Optional[int] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = OpenAIGPTLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Any = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : List[str] = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[str] = self.num_labels _lowerCAmelCase : str = OpenAIGPTForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : Union[str, Any] = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : int = config_and_inputs _lowerCAmelCase : Any = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) __magic_name__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly __magic_name__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def a ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' _lowerCAmelCase : List[Any] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": _lowerCAmelCase : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ , ) _lowerCAmelCase : Tuple = inputs_dict['labels'] _lowerCAmelCase : Dict = inputs_dict['labels'] _lowerCAmelCase : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case__ , ) _lowerCAmelCase : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def a ( self ): '''simple docstring''' _lowerCAmelCase : str = OpenAIGPTModelTester(self ) _lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , n_embd=37 ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Optional[Any] = OpenAIGPTModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(snake_case__ ) _lowerCAmelCase : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case__ ) # the president is _lowerCAmelCase : str = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 4_0477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the _lowerCAmelCase : Dict = model.generate(snake_case__ , do_sample=snake_case__ ) self.assertListEqual(output_ids[0].tolist() , snake_case__ )
630
'''simple docstring''' import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel lowerCAmelCase : Tuple = False lowerCAmelCase : str = True lowerCAmelCase : List[Any] = False if __name__ == "__main__": lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--repo_path""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") lowerCAmelCase : Optional[int] = parser.parse_args() lowerCAmelCase : int = { """image_size""": """sample_size""", """num_res_blocks""": """layers_per_block""", """block_channels""": """block_out_channels""", """down_blocks""": """down_block_types""", """up_blocks""": """up_block_types""", """downscale_freq_shift""": """freq_shift""", """resnet_num_groups""": """norm_num_groups""", """resnet_act_fn""": """act_fn""", """resnet_eps""": """norm_eps""", """num_head_channels""": """attention_head_dim""", } lowerCAmelCase : int = { """time_steps""": """time_proj""", """mid""": """mid_block""", """downsample_blocks""": """down_blocks""", """upsample_blocks""": """up_blocks""", } lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet""" with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader: lowerCAmelCase : int = reader.read() lowerCAmelCase : List[str] = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, """config.json"""): lowerCAmelCase : str = UNetaDModel(**config) else: lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel lowerCAmelCase : Dict = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) lowerCAmelCase : Union[str, Any] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: lowerCAmelCase : str = config[key] del config[key] lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]] lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]] if do_only_weights: lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin""")) lowerCAmelCase : str = {} for param_key, param_value in state_dict.items(): if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""): continue lowerCAmelCase : str = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(""".""")[0] == key: lowerCAmelCase : Dict = param_value lowerCAmelCase : Tuple = True if not has_changed: lowerCAmelCase : Tuple = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
630
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator from typing import Any class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = data _lowerCAmelCase : Node | None = None class UpperCamelCase__ : """simple docstring""" def __init__( self ): '''simple docstring''' _lowerCAmelCase : int = None _lowerCAmelCase : Union[str, Any] = None def __iter__( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.head while self.head: yield node.data _lowerCAmelCase : Tuple = node.next if node == self.head: break def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ): '''simple docstring''' return "->".join(str(snake_case__ ) for item in iter(self ) ) def a ( self , snake_case__ ): '''simple docstring''' self.insert_nth(len(self ) , snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' self.insert_nth(0 , snake_case__ ) def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' if index < 0 or index > len(self ): raise IndexError('list index out of range.' ) _lowerCAmelCase : Union[str, Any] = Node(snake_case__ ) if self.head is None: _lowerCAmelCase : Tuple = new_node # first node points itself _lowerCAmelCase : Any = new_node elif index == 0: # insert at head _lowerCAmelCase : List[str] = self.head _lowerCAmelCase : Any = new_node else: _lowerCAmelCase : Optional[int] = self.head for _ in range(index - 1 ): _lowerCAmelCase : Union[str, Any] = temp.next _lowerCAmelCase : Union[str, Any] = temp.next _lowerCAmelCase : Any = new_node if index == len(self ) - 1: # insert at tail _lowerCAmelCase : Tuple = new_node def a ( self ): '''simple docstring''' return self.delete_nth(0 ) def a ( self ): '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def a ( self , snake_case__ = 0 ): '''simple docstring''' if not 0 <= index < len(self ): raise IndexError('list index out of range.' ) _lowerCAmelCase : Union[str, Any] = self.head if self.head == self.tail: # just one node _lowerCAmelCase : str = None elif index == 0: # delete head node _lowerCAmelCase : int = self.tail.next.next _lowerCAmelCase : Optional[Any] = self.head.next else: _lowerCAmelCase : List[Any] = self.head for _ in range(index - 1 ): _lowerCAmelCase : Any = temp.next _lowerCAmelCase : int = temp.next _lowerCAmelCase : int = temp.next.next if index == len(self ) - 1: # delete at tail _lowerCAmelCase : str = temp return delete_node.data def a ( self ): '''simple docstring''' return len(self ) == 0 def lowercase (): """simple docstring""" _lowerCAmelCase : str = CircularLinkedList() assert len(_A ) == 0 assert circular_linked_list.is_empty() is True assert str(_A ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(_A ) == i circular_linked_list.insert_nth(_A , i + 1 ) assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(_A ) == "->".join(str(_A ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(_A ) == "->".join(str(_A ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
630
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ): '''simple docstring''' super().__init__() _lowerCAmelCase : Union[str, Any] = pad_token_id _lowerCAmelCase : List[Any] = max_length _lowerCAmelCase : Tuple = vocab _lowerCAmelCase : str = merges _lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ ) @classmethod def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()] _lowerCAmelCase : Any = tokenizer.get_vocab() return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ ) @classmethod def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ ) return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ ) @classmethod def a ( cls , snake_case__ ): '''simple docstring''' return cls(**snake_case__ ) def a ( self ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = self.tf_tokenizer(snake_case__ ) _lowerCAmelCase : str = tf.ones_like(snake_case__ ) if self.pad_token_id is not None: # pad the tokens up to max length _lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: _lowerCAmelCase , _lowerCAmelCase : str = pad_model_inputs( snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
630
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Optional[int] = { """configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""], """feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""], """processing_mctct""": ["""MCTCTProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MCTCTForCTC""", """MCTCTModel""", """MCTCTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Optional[int] = { """configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""], """feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""], """processing_mctct""": ["""MCTCTProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MCTCTForCTC""", """MCTCTModel""", """MCTCTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = VideoToVideoSDPipeline __magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"} __magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"} __magic_name__ = PipelineTesterMixin.required_optional_params - {"latents"} __magic_name__ = False # No `output_type`. __magic_name__ = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : int = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , ) _lowerCAmelCase : Dict = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , ) torch.manual_seed(0 ) _lowerCAmelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _lowerCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) _lowerCAmelCase : str = CLIPTextModel(snake_case__ ) _lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _lowerCAmelCase : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : Tuple = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Union[str, Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : Dict = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : int = { 'prompt': 'A painting of a squirrel eating a burger', 'video': video, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase : str = self.get_dummy_components() _lowerCAmelCase : Dict = VideoToVideoSDPipeline(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Any = self.get_dummy_inputs(snake_case__ ) _lowerCAmelCase : Dict = 'np' _lowerCAmelCase : List[str] = sd_pipe(**snake_case__ ).frames _lowerCAmelCase : Dict = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) _lowerCAmelCase : Tuple = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def a ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=5E-3 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames _lowerCAmelCase : str = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase : str = torch.randn((1, 10, 3, 1024, 576) , generator=snake_case__ ) _lowerCAmelCase : List[Any] = video.to('cuda' ) _lowerCAmelCase : Optional[int] = 'Spiderman is surfing' _lowerCAmelCase : List[str] = pipe(snake_case__ , video=snake_case__ , generator=snake_case__ , num_inference_steps=3 , output_type='pt' ).frames _lowerCAmelCase : Any = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
630
'''simple docstring''' lowerCAmelCase : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def lowercase (_A ): """simple docstring""" _lowerCAmelCase : str = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0] number //= 1_0_0_0_0_0 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowerCAmelCase : list[bool | None] = [None] * 10_00_00_00 lowerCAmelCase : List[str] = True lowerCAmelCase : Union[str, Any] = False def lowercase (_A ): """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore _lowerCAmelCase : Any = chain(next_number(_A ) ) _lowerCAmelCase : List[str] = number_chain while number < 1_0_0_0_0_0_0_0: _lowerCAmelCase : Tuple = number_chain number *= 1_0 return number_chain def lowercase (_A = 1_0_0_0_0_0_0_0 ): """simple docstring""" for i in range(1 , _A ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(_A ) if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution() = }''')
630
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: lowerCAmelCase : Optional[int] = None lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : str = """▁""" lowerCAmelCase : str = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : List[str] = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } lowerCAmelCase : Tuple = { """google/pegasus-xsum""": 5_12, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PegasusTokenizer __magic_name__ = ["input_ids", "attention_mask"] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<pad>" , snake_case__="</s>" , snake_case__="<unk>" , snake_case__="<mask_2>" , snake_case__="<mask_1>" , snake_case__=None , snake_case__=103 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = offset if additional_special_tokens is not None: if not isinstance(snake_case__ , snake_case__ ): raise TypeError( F'additional_special_tokens should be of type {type(snake_case__ )}, but is' F' {type(snake_case__ )}' ) _lowerCAmelCase : Tuple = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F'<unk_{i}>' for i in range(len(snake_case__ ) , self.offset - 1 ) ] if len(set(snake_case__ ) ) != len(snake_case__ ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) _lowerCAmelCase : Any = additional_special_tokens_extended else: _lowerCAmelCase : int = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )] super().__init__( snake_case__ , tokenizer_file=snake_case__ , pad_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , mask_token=snake_case__ , mask_token_sent=snake_case__ , offset=snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ , ) _lowerCAmelCase : Union[str, Any] = vocab_file _lowerCAmelCase : str = False if not self.vocab_file else True def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( 'There should be 3 special tokens: mask_token, pad_token, and eos_token +' F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' ) return [1 if x in all_special_ids else 0 for x in seq] def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ): '''simple docstring''' if already_has_special_tokens: return self._special_token_mask(snake_case__ ) elif token_ids_a is None: return self._special_token_mask(snake_case__ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : Union[str, Any] = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
630
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__="swish" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , snake_case__=0.25 , snake_case__=0.0 , snake_case__=0.0 , ): '''simple docstring''' _lowerCAmelCase : Tuple = parent _lowerCAmelCase : Optional[int] = batch_size _lowerCAmelCase : List[Any] = image_size _lowerCAmelCase : List[Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 ) _lowerCAmelCase : Optional[Any] = hidden_act _lowerCAmelCase : List[Any] = conv_kernel_size _lowerCAmelCase : Optional[Any] = output_stride _lowerCAmelCase : List[Any] = classifier_dropout_prob _lowerCAmelCase : str = use_labels _lowerCAmelCase : List[str] = is_training _lowerCAmelCase : Optional[int] = num_labels _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : str = scope _lowerCAmelCase : Any = width_multiplier _lowerCAmelCase : Union[str, Any] = ffn_dropout _lowerCAmelCase : Optional[int] = attn_dropout def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : Optional[Any] = None _lowerCAmelCase : Dict = None if self.use_labels: _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def a ( self ): '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = MobileViTVaModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : str = model(snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.num_labels _lowerCAmelCase : List[Any] = MobileViTVaForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Dict = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs _lowerCAmelCase : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) __magic_name__ = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : int = MobileViTVaModelTester(self ) _lowerCAmelCase : Dict = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def a ( self ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def a ( self ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : str = model_class(snake_case__ ) _lowerCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : int = [*signature.parameters.keys()] _lowerCAmelCase : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ): _lowerCAmelCase : Dict = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): _lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) _lowerCAmelCase : List[str] = outputs.hidden_states _lowerCAmelCase : List[str] = 5 self.assertEqual(len(snake_case__ ) , snake_case__ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _lowerCAmelCase : List[Any] = 2 for i in range(len(snake_case__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Optional[int] = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase : Any = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Dict = MobileViTVaModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def lowercase (): """simple docstring""" _lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def a ( self ): '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( snake_case__ ) _lowerCAmelCase : str = self.default_image_processor _lowerCAmelCase : Any = prepare_img() _lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : Tuple = model(**snake_case__ ) # verify the logits _lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) _lowerCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : Any = model.to(snake_case__ ) _lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : Optional[int] = prepare_img() _lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : int = model(**snake_case__ ) _lowerCAmelCase : Dict = outputs.logits # verify the logits _lowerCAmelCase : str = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , snake_case__ ) _lowerCAmelCase : Any = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : List[Any] = model.to(snake_case__ ) _lowerCAmelCase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : Tuple = prepare_img() _lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : Any = model(**snake_case__ ) _lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu() _lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] ) _lowerCAmelCase : List[Any] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , snake_case__ ) _lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) _lowerCAmelCase : Tuple = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , snake_case__ )
630
1
'''simple docstring''' import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def lowercase (_A ): # picklable for multiprocessing """simple docstring""" return x.sum() def lowercase (_A ): # picklable for multiprocessing """simple docstring""" return i + 1 @dataclass class UpperCamelCase__ : """simple docstring""" __magic_name__ = 42 __magic_name__ = 42 class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = {} _lowerCAmelCase : Union[str, Any] = [] _lowerCAmelCase : str = 1 _lowerCAmelCase : List[Any] = [1, 2] _lowerCAmelCase : List[Any] = {'a': 1, 'b': 2} _lowerCAmelCase : int = {'a': [1, 2], 'b': [3, 4]} _lowerCAmelCase : int = {'a': {'1': 1}, 'b': 2} _lowerCAmelCase : Optional[Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4} _lowerCAmelCase : Tuple = {} _lowerCAmelCase : int = [] _lowerCAmelCase : Union[str, Any] = 2 _lowerCAmelCase : Optional[int] = [2, 3] _lowerCAmelCase : Optional[int] = {'a': 2, 'b': 3} _lowerCAmelCase : str = {'a': [2, 3], 'b': [4, 5]} _lowerCAmelCase : Union[str, Any] = {'a': {'1': 2}, 'b': 3} _lowerCAmelCase : Optional[int] = {'a': 2, 'b': 3, 'c': 4, 'd': 5} self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ ) , snake_case__ ) _lowerCAmelCase : Union[str, Any] = 2 self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ ) self.assertEqual(map_nested(snake_case__ , snake_case__ , num_proc=snake_case__ ) , snake_case__ ) _lowerCAmelCase : Optional[Any] = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )} _lowerCAmelCase : List[str] = {'a': 2, 'b': 0, 'c': 2} _lowerCAmelCase : List[Any] = { 'a': np.eye(2 ).astype(snake_case__ ), 'b': np.zeros(3 ).astype(snake_case__ ), 'c': np.ones(2 ).astype(snake_case__ ), } self.assertEqual(map_nested(snake_case__ , snake_case__ , map_numpy=snake_case__ ) , snake_case__ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(snake_case__ , snake_case__ , map_numpy=snake_case__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(snake_case__ , snake_case__ , map_numpy=snake_case__ , num_proc=snake_case__ ) , snake_case__ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(snake_case__ , snake_case__ , map_numpy=snake_case__ , num_proc=snake_case__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(snake_case__ ): # can't pickle a local lambda map_nested(lambda snake_case__ : x + 1 , snake_case__ , num_proc=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = {'a': 1, 'b': 2} _lowerCAmelCase : Optional[Any] = {'a': 3, 'b': 4} _lowerCAmelCase : Dict = {'a': 5, 'b': 6} _lowerCAmelCase : Union[str, Any] = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(snake_case__ , snake_case__ , snake_case__ ) ) , snake_case__ ) def a ( self ): '''simple docstring''' class UpperCamelCase__ : """simple docstring""" __magic_name__ = "bar" _lowerCAmelCase : Any = Foo() self.assertEqual(foo.my_attr , 'bar' ) with temporary_assignment(snake_case__ , 'my_attr' , 'BAR' ): self.assertEqual(foo.my_attr , 'BAR' ) self.assertEqual(foo.my_attr , 'bar' ) @pytest.mark.parametrize( 'iterable_length, num_proc, expected_num_proc' , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (1_6, 1_6, 1_6), (1_6, 1_7, 1_6), (1_7, 1_6, 1_6), ] , ) def lowercase (_A , _A , _A ): """simple docstring""" with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch( 'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool: _lowerCAmelCase : Any = {f'{i}': i for i in range(_A )} _lowerCAmelCase : Union[str, Any] = map_nested(lambda _A : x + 1_0 , _A , num_proc=_A , parallel_min_length=1_6 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @require_tf def a ( self ): '''simple docstring''' import tensorflow as tf from tensorflow.keras import layers _lowerCAmelCase : Any = layers.Dense(2 ) def gen_random_output(): _lowerCAmelCase : List[str] = tf.random.uniform((1, 3) ) return model(snake_case__ ).numpy() with temp_seed(42 , set_tensorflow=snake_case__ ): _lowerCAmelCase : Any = gen_random_output() with temp_seed(42 , set_tensorflow=snake_case__ ): _lowerCAmelCase : List[str] = gen_random_output() _lowerCAmelCase : Dict = gen_random_output() np.testing.assert_equal(snake_case__ , snake_case__ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def a ( self ): '''simple docstring''' import torch def gen_random_output(): _lowerCAmelCase : List[Any] = torch.nn.Linear(3 , 2 ) _lowerCAmelCase : int = torch.rand(1 , 3 ) return model(snake_case__ ).detach().numpy() with temp_seed(42 , set_pytorch=snake_case__ ): _lowerCAmelCase : List[str] = gen_random_output() with temp_seed(42 , set_pytorch=snake_case__ ): _lowerCAmelCase : Optional[int] = gen_random_output() _lowerCAmelCase : Optional[int] = gen_random_output() np.testing.assert_equal(snake_case__ , snake_case__ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def a ( self ): '''simple docstring''' def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): _lowerCAmelCase : Dict = gen_random_output() with temp_seed(42 ): _lowerCAmelCase : int = gen_random_output() _lowerCAmelCase : str = gen_random_output() np.testing.assert_equal(snake_case__ , snake_case__ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('input_data' , [{}] ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[Any] = NestedDataStructure(_A ).data assert output_data == input_data @pytest.mark.parametrize( 'data, expected_output' , [ ({}, []), ([], []), ('foo', ['foo']), (['foo', 'bar'], ['foo', 'bar']), ([['foo', 'bar']], ['foo', 'bar']), ([[['foo'], ['bar']]], ['foo', 'bar']), ([[['foo'], 'bar']], ['foo', 'bar']), ({'a': 1, 'b': 2}, [1, 2]), ({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]), ({'a': {'1': 1}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': [2]}, [1, 2]), ] , ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : List[str] = NestedDataStructure(_A ).flatten() assert output == expected_output def lowercase (): """simple docstring""" _lowerCAmelCase : Any = A(x=1 , y='foobar' ) _lowerCAmelCase : Dict = {'x': 1, 'y': 'foobar'} assert asdict(_A ) == expected_output _lowerCAmelCase : Optional[int] = {'a': {'b': A(x=1_0 , y='foo' )}, 'c': [A(x=2_0 , y='bar' )]} _lowerCAmelCase : Dict = {'a': {'b': {'x': 1_0, 'y': 'foo'}}, 'c': [{'x': 2_0, 'y': 'bar'}]} assert asdict(_A ) == expected_output with pytest.raises(_A ): asdict([1, A(x=1_0 , y='foo' )] ) def lowercase (_A ): """simple docstring""" return text.split() def lowercase (_A ): """simple docstring""" yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def lowercase (): """simple docstring""" with Pool(2 ) as pool: _lowerCAmelCase : List[str] = list(iflatmap_unordered(_A , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) ) assert out.count('hello' ) == 1_0 assert out.count('there' ) == 1_0 assert len(_A ) == 2_0 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _lowerCAmelCase : List[str] = list(iflatmap_unordered(_A , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) ) assert out.count('hello' ) == 1_0 assert out.count('there' ) == 1_0 assert len(_A ) == 2_0 # check that we get items as fast as possible with Pool(2 ) as pool: _lowerCAmelCase : List[Any] = [] for yield_time, content in iflatmap_unordered( _A , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(_A ) assert out.count('a' ) == 2 assert out.count('b' ) == 2 assert len(_A ) == 4
630
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Dict = 'The dog is cute and lives in the garden house' _lowerCAmelCase : List[str] = jnp.array([tokenizer.encode(snake_case__ )] ) _lowerCAmelCase : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _lowerCAmelCase : Tuple = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) _lowerCAmelCase : Union[str, Any] = model(snake_case__ )['last_hidden_state'] self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
630
1
'''simple docstring''' import numpy as np from transformers import Pipeline def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = np.max(_A , axis=-1 , keepdims=_A ) _lowerCAmelCase : Any = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_A ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = {} if "second_text" in kwargs: _lowerCAmelCase : Optional[int] = kwargs['second_text'] return preprocess_kwargs, {}, {} def a ( self , snake_case__ , snake_case__=None ): '''simple docstring''' return self.tokenizer(snake_case__ , text_pair=snake_case__ , return_tensors=self.framework ) def a ( self , snake_case__ ): '''simple docstring''' return self.model(**snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = model_outputs.logits[0].numpy() _lowerCAmelCase : List[str] = softmax(snake_case__ ) _lowerCAmelCase : Any = np.argmax(snake_case__ ) _lowerCAmelCase : List[str] = self.model.config.idalabel[best_class] _lowerCAmelCase : Tuple = probabilities[best_class].item() _lowerCAmelCase : List[str] = logits.tolist() return {"label": label, "score": score, "logits": logits}
630
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Dict = len(_A ) while cur > 1: # Find the maximum number in arr _lowerCAmelCase : int = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _lowerCAmelCase : Dict = arr[mi::-1] + arr[mi + 1 : len(_A )] # Reverse whole list _lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(_A )] cur -= 1 return arr if __name__ == "__main__": lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
630
1
'''simple docstring''' from __future__ import annotations import math def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : str = u for i in range(1 , _A ): _lowerCAmelCase : Optional[int] = temp * (u - i) return temp def lowercase (): """simple docstring""" _lowerCAmelCase : Any = int(input('enter the numbers of values: ' ) ) _lowerCAmelCase : list[list[float]] = [] for _ in range(_A ): y.append([] ) for i in range(_A ): for j in range(_A ): y[i].append(_A ) _lowerCAmelCase : Optional[Any] = 0 print('enter the values of parameters in a list: ' ) _lowerCAmelCase : List[str] = list(map(_A , input().split() ) ) print('enter the values of corresponding parameters: ' ) for i in range(_A ): _lowerCAmelCase : str = float(input() ) _lowerCAmelCase : Dict = int(input('enter the value to interpolate: ' ) ) _lowerCAmelCase : Optional[int] = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , _A ): for j in range(n - i ): _lowerCAmelCase : Dict = y[j + 1][i - 1] - y[j][i - 1] _lowerCAmelCase : int = y[0][0] for i in range(1 , _A ): summ += (ucal(_A , _A ) * y[0][i]) / math.factorial(_A ) print(f'the value at {value} is {summ}' ) if __name__ == "__main__": main()
630
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : str = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "gptj" __magic_name__ = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : int = vocab_size _lowerCAmelCase : Optional[int] = n_positions _lowerCAmelCase : Optional[int] = n_embd _lowerCAmelCase : Optional[int] = n_layer _lowerCAmelCase : str = n_head _lowerCAmelCase : Tuple = n_inner _lowerCAmelCase : Tuple = rotary_dim _lowerCAmelCase : Optional[int] = activation_function _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : List[str] = embd_pdrop _lowerCAmelCase : int = attn_pdrop _lowerCAmelCase : Any = layer_norm_epsilon _lowerCAmelCase : Optional[int] = initializer_range _lowerCAmelCase : List[str] = use_cache _lowerCAmelCase : Dict = bos_token_id _lowerCAmelCase : Any = eos_token_id super().__init__( bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ ) if not getattr(self._config , 'pad_token_id' , snake_case__ ): # TODO: how to do that better? _lowerCAmelCase : Any = 0 @property def a ( self ): '''simple docstring''' _lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(snake_case__ , direction='inputs' ) _lowerCAmelCase : int = {0: 'batch', 1: 'past_sequence + sequence'} else: _lowerCAmelCase : int = {0: 'batch', 1: 'sequence'} return common_inputs @property def a ( self ): '''simple docstring''' return self._config.n_layer @property def a ( self ): '''simple docstring''' return self._config.n_head def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = super(snake_case__ , self ).generate_dummy_inputs( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) # We need to order the input in the way they appears in the forward() _lowerCAmelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs['input_ids'].shape # Not using the same length for past_key_values _lowerCAmelCase : Any = seqlen + 2 _lowerCAmelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _lowerCAmelCase : Tuple = [ (torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers ) ] _lowerCAmelCase : Tuple = common_inputs['attention_mask'] if self.use_past: _lowerCAmelCase : Any = ordered_inputs['attention_mask'].dtype _lowerCAmelCase : Union[str, Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 ) return ordered_inputs @property def a ( self ): '''simple docstring''' return 13
630
1
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCAmelCase : Union[str, Any] = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCAmelCase : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowerCAmelCase : Tuple = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") lowerCAmelCase : List[str] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Tuple = None # source code of `config_class` _lowerCAmelCase : Any = inspect.getsource(_A ) _lowerCAmelCase : List[str] = _re_checkpoint.findall(_A ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('/' ): _lowerCAmelCase : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link _lowerCAmelCase : List[Any] = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: _lowerCAmelCase : Optional[int] = ckpt_name break return checkpoint def lowercase (): """simple docstring""" _lowerCAmelCase : Dict = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue _lowerCAmelCase : Any = get_checkpoint_from_config_class(_A ) _lowerCAmelCase : Optional[Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_A ) if len(_A ) > 0: _lowerCAmelCase : List[str] = '\n'.join(sorted(_A ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
630
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Any = { """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = int(number**0.5 ) return number == sq * sq def lowercase (_A , _A , _A , _A , _A , _A ): """simple docstring""" _lowerCAmelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den _lowerCAmelCase : int = x_den * y_den * z_den _lowerCAmelCase : int = gcd(_A , _A ) top //= hcf bottom //= hcf return top, bottom def lowercase (_A = 3_5 ): """simple docstring""" _lowerCAmelCase : set = set() _lowerCAmelCase : int _lowerCAmelCase : Fraction = Fraction(0 ) _lowerCAmelCase : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 _lowerCAmelCase : List[str] = x_num * y_den + x_den * y_num _lowerCAmelCase : List[Any] = x_den * y_den _lowerCAmelCase : Any = gcd(_A , _A ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _lowerCAmelCase : List[str] = add_three( _A , _A , _A , _A , _A , _A ) unique_s.add(_A ) # n=2 _lowerCAmelCase : str = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) _lowerCAmelCase : List[str] = x_den * x_den * y_den * y_den if is_sq(_A ) and is_sq(_A ): _lowerCAmelCase : Tuple = int(sqrt(_A ) ) _lowerCAmelCase : List[Any] = int(sqrt(_A ) ) _lowerCAmelCase : List[str] = gcd(_A , _A ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _lowerCAmelCase : Optional[Any] = add_three( _A , _A , _A , _A , _A , _A ) unique_s.add(_A ) # n=-1 _lowerCAmelCase : Union[str, Any] = x_num * y_num _lowerCAmelCase : List[Any] = x_den * y_num + x_num * y_den _lowerCAmelCase : Optional[Any] = gcd(_A , _A ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _lowerCAmelCase : List[Any] = add_three( _A , _A , _A , _A , _A , _A ) unique_s.add(_A ) # n=2 _lowerCAmelCase : Optional[int] = x_num * x_num * y_num * y_num _lowerCAmelCase : List[str] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_A ) and is_sq(_A ): _lowerCAmelCase : Optional[int] = int(sqrt(_A ) ) _lowerCAmelCase : int = int(sqrt(_A ) ) _lowerCAmelCase : int = gcd(_A , _A ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _lowerCAmelCase : List[Any] = add_three( _A , _A , _A , _A , _A , _A ) unique_s.add(_A ) for num, den in unique_s: total += Fraction(_A , _A ) return total.denominator + total.numerator if __name__ == "__main__": print(F'''{solution() = }''')
630
'''simple docstring''' import math from datetime import datetime, timedelta def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[Any] = year % 1_9 _lowerCAmelCase : Any = year % 4 _lowerCAmelCase : Optional[int] = year % 7 _lowerCAmelCase : int = math.floor(year / 1_0_0 ) _lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) _lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4 _lowerCAmelCase : Dict = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 _lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon _lowerCAmelCase : Union[str, Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(_A , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(_A , 4 , 1_8 ) else: return datetime(_A , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (19_94, 20_00, 20_10, 20_21, 20_23): lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was""" print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
630
1
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60] _lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12] _lowerCAmelCase : Dict = 100 self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex( snake_case__ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
630
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60] _lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12] _lowerCAmelCase : Dict = 100 self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex( snake_case__ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
630
1
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets lowerCAmelCase : Optional[Any] = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ lowerCAmelCase : Tuple = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric(\"mean_iou\") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ lowerCAmelCase : List[Any] = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def lowercase (_A , _A , _A , _A , _A = None , _A = False , ): """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): _lowerCAmelCase : str = new_id # turn into Numpy arrays _lowerCAmelCase : Dict = np.array(_A ) _lowerCAmelCase : Union[str, Any] = np.array(_A ) if reduce_labels: _lowerCAmelCase : int = 2_5_5 _lowerCAmelCase : Union[str, Any] = label - 1 _lowerCAmelCase : Tuple = 2_5_5 _lowerCAmelCase : Any = label != ignore_index _lowerCAmelCase : Tuple = np.not_equal(_A , _A ) _lowerCAmelCase : Any = pred_label[mask] _lowerCAmelCase : str = np.array(_A )[mask] _lowerCAmelCase : List[Any] = pred_label[pred_label == label] _lowerCAmelCase : int = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0] _lowerCAmelCase : int = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0] _lowerCAmelCase : Union[str, Any] = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0] _lowerCAmelCase : Optional[int] = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def lowercase (_A , _A , _A , _A , _A = None , _A = False , ): """simple docstring""" _lowerCAmelCase : Dict = np.zeros((num_labels,) , dtype=np.floataa ) _lowerCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa ) _lowerCAmelCase : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa ) _lowerCAmelCase : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(_A , _A ): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = intersect_and_union( _A , _A , _A , _A , _A , _A ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def lowercase (_A , _A , _A , _A , _A = None , _A = None , _A = False , ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = total_intersect_and_union( _A , _A , _A , _A , _A , _A ) # compute metrics _lowerCAmelCase : str = {} _lowerCAmelCase : Dict = total_area_intersect.sum() / total_area_label.sum() _lowerCAmelCase : Tuple = total_area_intersect / total_area_union _lowerCAmelCase : int = total_area_intersect / total_area_label _lowerCAmelCase : Optional[int] = np.nanmean(_A ) _lowerCAmelCase : Any = np.nanmean(_A ) _lowerCAmelCase : Optional[int] = all_acc _lowerCAmelCase : int = iou _lowerCAmelCase : Any = acc if nan_to_num is not None: _lowerCAmelCase : str = {metric: np.nan_to_num(_A , nan=_A ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def a ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { 'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), 'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), } ) , reference_urls=[ 'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py' ] , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' _lowerCAmelCase : str = mean_iou( results=snake_case__ , gt_seg_maps=snake_case__ , num_labels=snake_case__ , ignore_index=snake_case__ , nan_to_num=snake_case__ , label_map=snake_case__ , reduce_labels=snake_case__ , ) return iou_result
630
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps _lowerCAmelCase : Any = boundary[0] _lowerCAmelCase : List[str] = boundary[1] _lowerCAmelCase : Tuple = make_points(_A , _A , _A ) _lowerCAmelCase : Tuple = 0.0 y += (h / 2.0) * f(_A ) for i in x_i: # print(i) y += h * f(_A ) y += (h / 2.0) * f(_A ) return y def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = a + h while x < (b - h): yield x _lowerCAmelCase : Any = x + h def lowercase (_A ): # enter your function here """simple docstring""" _lowerCAmelCase : int = (x - 0) * (x - 0) return y def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration _lowerCAmelCase : Dict = 1.0 # Upper bound of integration _lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution _lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration _lowerCAmelCase : List[Any] = method_a(_A , _A ) print(f'y = {y}' ) if __name__ == "__main__": main()
630
1
'''simple docstring''' from typing import Any def lowercase (_A , _A , _A , _A , _A , ): """simple docstring""" _validation( _A , _A , _A , _A , _A , ) # Creates data structures and fill initial step _lowerCAmelCase : dict = {} _lowerCAmelCase : dict = {} for state in states_space: _lowerCAmelCase : Union[str, Any] = observations_space[0] _lowerCAmelCase : Dict = ( initial_probabilities[state] * emission_probabilities[state][observation] ) _lowerCAmelCase : int = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(_A ) ): _lowerCAmelCase : Optional[int] = observations_space[o] _lowerCAmelCase : str = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function _lowerCAmelCase : Tuple = '' _lowerCAmelCase : List[Any] = -1 for k_state in states_space: _lowerCAmelCase : Optional[Any] = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: _lowerCAmelCase : Optional[Any] = probability _lowerCAmelCase : str = k_state # Update probabilities and pointers dicts _lowerCAmelCase : List[str] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) _lowerCAmelCase : List[Any] = arg_max # The final observation _lowerCAmelCase : Dict = observations_space[len(_A ) - 1] # argmax for given final observation _lowerCAmelCase : Any = '' _lowerCAmelCase : str = -1 for k_state in states_space: _lowerCAmelCase : Optional[Any] = probabilities[(k_state, final_observation)] if probability > max_probability: _lowerCAmelCase : Optional[Any] = probability _lowerCAmelCase : Dict = k_state _lowerCAmelCase : List[Any] = arg_max # Process pointers backwards _lowerCAmelCase : Tuple = last_state _lowerCAmelCase : Optional[Any] = [] for o in range(len(_A ) - 1 , -1 , -1 ): result.append(_A ) _lowerCAmelCase : List[str] = pointers[previous, observations_space[o]] result.reverse() return result def lowercase (_A , _A , _A , _A , _A , ): """simple docstring""" _validate_not_empty( _A , _A , _A , _A , _A , ) _validate_lists(_A , _A ) _validate_dicts( _A , _A , _A ) def lowercase (_A , _A , _A , _A , _A , ): """simple docstring""" if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('There\'s an empty parameter' ) def lowercase (_A , _A ): """simple docstring""" _validate_list(_A , 'observations_space' ) _validate_list(_A , 'states_space' ) def lowercase (_A , _A ): """simple docstring""" if not isinstance(_object , _A ): _lowerCAmelCase : Optional[Any] = f'{var_name} must be a list' raise ValueError(_A ) else: for x in _object: if not isinstance(_A , _A ): _lowerCAmelCase : List[str] = f'{var_name} must be a list of strings' raise ValueError(_A ) def lowercase (_A , _A , _A , ): """simple docstring""" _validate_dict(_A , 'initial_probabilities' , _A ) _validate_nested_dict(_A , 'transition_probabilities' ) _validate_nested_dict(_A , 'emission_probabilities' ) def lowercase (_A , _A ): """simple docstring""" _validate_dict(_object , _A , _A ) for x in _object.values(): _validate_dict(_A , _A , _A , _A ) def lowercase (_A , _A , _A , _A = False ): """simple docstring""" if not isinstance(_object , _A ): _lowerCAmelCase : str = f'{var_name} must be a dict' raise ValueError(_A ) if not all(isinstance(_A , _A ) for x in _object ): _lowerCAmelCase : Union[str, Any] = f'{var_name} all keys must be strings' raise ValueError(_A ) if not all(isinstance(_A , _A ) for x in _object.values() ): _lowerCAmelCase : int = 'nested dictionary ' if nested else '' _lowerCAmelCase : Any = f'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(_A ) if __name__ == "__main__": from doctest import testmod testmod()
630
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase : int = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' def lowercase (_A = 1_0_0_0 ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = 1, 1 _lowerCAmelCase : Optional[Any] = 2 while True: _lowerCAmelCase : Any = 0 _lowerCAmelCase : List[Any] = fa + fa _lowerCAmelCase , _lowerCAmelCase : Tuple = fa, f index += 1 for _ in str(_A ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
630
'''simple docstring''' from collections import Counter from timeit import timeit def lowercase (_A = "" , ): """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2 def lowercase (_A = "" ): """simple docstring""" if len(_A ) == 0: return True _lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string _lowerCAmelCase : dict[str, int] = {} for character in lower_case_input_str: _lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1 _lowerCAmelCase : List[Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def lowercase (_A = "" ): """simple docstring""" print('\nFor string = ' , _A , ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) print( '> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) if __name__ == "__main__": lowerCAmelCase : Tuple = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
630
1
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Any = iter(_A ) while True: _lowerCAmelCase : int = tuple(itertools.islice(_A , _A ) ) if not chunk: return yield chunk def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = ''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _lowerCAmelCase : Tuple = '' if len(_A ) < 2: return dirty for i in range(len(_A ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_A ) & 1: clean += "X" return clean def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[Any] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _lowerCAmelCase : Optional[int] = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_A ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_A ) return table def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : int = generate_table(_A ) _lowerCAmelCase : Optional[int] = prepare_input(_A ) _lowerCAmelCase : Tuple = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_A , 2 ): _lowerCAmelCase , _lowerCAmelCase : List[Any] = divmod(table.index(_A ) , 5 ) _lowerCAmelCase , _lowerCAmelCase : Tuple = divmod(table.index(_A ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : List[Any] = generate_table(_A ) _lowerCAmelCase : Optional[Any] = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_A , 2 ): _lowerCAmelCase , _lowerCAmelCase : Any = divmod(table.index(_A ) , 5 ) _lowerCAmelCase , _lowerCAmelCase : int = divmod(table.index(_A ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
630
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : int = { """facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "data2vec-text" def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Tuple = hidden_size _lowerCAmelCase : Dict = num_hidden_layers _lowerCAmelCase : int = num_attention_heads _lowerCAmelCase : str = hidden_act _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : Any = hidden_dropout_prob _lowerCAmelCase : Optional[int] = attention_probs_dropout_prob _lowerCAmelCase : str = max_position_embeddings _lowerCAmelCase : Any = type_vocab_size _lowerCAmelCase : int = initializer_range _lowerCAmelCase : List[str] = layer_norm_eps _lowerCAmelCase : List[Any] = position_embedding_type _lowerCAmelCase : str = use_cache _lowerCAmelCase : Union[str, Any] = classifier_dropout class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def a ( self ): '''simple docstring''' if self.task == "multiple-choice": _lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
630
1
'''simple docstring''' import socket def lowercase (): """simple docstring""" _lowerCAmelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _lowerCAmelCase : Optional[int] = socket.gethostname() _lowerCAmelCase : Any = 1_2_3_1_2 sock.connect((host, port) ) sock.send(B'Hello server!' ) with open('Received_file' , 'wb' ) as out_file: print('File opened' ) print('Receiving data...' ) while True: _lowerCAmelCase : Union[str, Any] = sock.recv(1_0_2_4 ) if not data: break out_file.write(_A ) print('Successfully received the file' ) sock.close() print('Connection closed' ) if __name__ == "__main__": main()
630
'''simple docstring''' import pytest import datasets # Import fixture modules as plugins lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""] def lowercase (_A , _A ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def lowercase (_A ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache' _lowerCAmelCase : Dict = test_hf_cache_home / 'datasets' _lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics' _lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) ) _lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) ) _lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) ) @pytest.fixture(autouse=_A , scope='session' ) def lowercase (): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_A ) def lowercase (_A ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A ) @pytest.fixture def lowercase (_A ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
630
1
'''simple docstring''' def lowercase (_A = 1 , _A = 1_0_0_0 ): """simple docstring""" _lowerCAmelCase : str = 1 _lowerCAmelCase : Optional[Any] = 0 for divide_by_number in range(_A , digit + 1 ): _lowerCAmelCase : list[int] = [] _lowerCAmelCase : Dict = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(_A ): _lowerCAmelCase : List[str] = len(_A ) _lowerCAmelCase : int = divide_by_number else: has_been_divided.append(_A ) _lowerCAmelCase : str = now_divide * 1_0 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
630
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase : str = logging.get_logger(__name__) # General docstring lowerCAmelCase : Optional[Any] = """RegNetConfig""" # Base docstring lowerCAmelCase : int = """facebook/regnet-y-040""" lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7] # Image classification docstring lowerCAmelCase : Any = """facebook/regnet-y-040""" lowerCAmelCase : Optional[Any] = """tabby, tabby cat""" lowerCAmelCase : Tuple = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , ) _lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) _lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) ) _lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ ) _lowerCAmelCase : int = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : str = config.num_channels _lowerCAmelCase : List[Any] = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) ) _lowerCAmelCase : Tuple = self.embedder(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' ) _lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ ) class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' ) _lowerCAmelCase : str = [ tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.pooler(snake_case__ ) for layer_module in self.attention: _lowerCAmelCase : Tuple = layer_module(snake_case__ ) _lowerCAmelCase : Optional[Any] = hidden_state * pooled return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1 _lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width ) _lowerCAmelCase : Optional[Any] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _lowerCAmelCase : Any = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ), ] _lowerCAmelCase : List[str] = ACTaFN[config.hidden_act] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = hidden_state for layer_module in self.layers: _lowerCAmelCase : int = layer_module(snake_case__ ) _lowerCAmelCase : int = self.shortcut(snake_case__ ) hidden_state += residual _lowerCAmelCase : Tuple = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1 _lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width ) _lowerCAmelCase : Optional[Any] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) _lowerCAmelCase : Tuple = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ), ] _lowerCAmelCase : Tuple = ACTaFN[config.hidden_act] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = hidden_state for layer_module in self.layers: _lowerCAmelCase : List[Any] = layer_module(snake_case__ ) _lowerCAmelCase : Tuple = self.shortcut(snake_case__ ) hidden_state += residual _lowerCAmelCase : str = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer _lowerCAmelCase : Optional[int] = [ # downsampling is done in the first layer with stride of 2 layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ), *[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )], ] def a ( self , snake_case__ ): '''simple docstring''' for layer_module in self.layers: _lowerCAmelCase : int = layer_module(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : str = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) _lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) ) def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ): '''simple docstring''' _lowerCAmelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _lowerCAmelCase : str = hidden_states + (hidden_state,) _lowerCAmelCase : List[str] = stage_module(snake_case__ ) if output_hidden_states: _lowerCAmelCase : Dict = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) @keras_serializable class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" __magic_name__ = RegNetConfig def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = config _lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' ) _lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' ) _lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' ) @unpack_inputs def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' _lowerCAmelCase : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ ) _lowerCAmelCase : List[str] = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) _lowerCAmelCase : List[Any] = encoder_outputs[0] _lowerCAmelCase : Tuple = self.pooler(snake_case__ ) # Change to NCHW output format have uniformity in the modules _lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) _lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = RegNetConfig __magic_name__ = "regnet" __magic_name__ = "pixel_values" @property def a ( self ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} lowerCAmelCase : List[Any] = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ lowerCAmelCase : Dict = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) _lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : str = self.regnet( pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = config.num_labels _lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' ) # classification head _lowerCAmelCase : Optional[int] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : Dict = self.regnet( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) _lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] _lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ ) _lowerCAmelCase : Tuple = self.classifier[1](snake_case__ ) _lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ ) if not return_dict: _lowerCAmelCase : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
630
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Optional[Any] = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
'''simple docstring''' from typing import Any def lowercase (_A ): """simple docstring""" if not input_list: return [] _lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list] _lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
630
1
'''simple docstring''' from math import ceil def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : int = list(range(0 , _A ) ) _lowerCAmelCase : Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check _lowerCAmelCase : List[str] = [] for i in device_map_blocks: if device_map_blocks.count(_A ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(_A ) # Missing blocks _lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks] _lowerCAmelCase : Optional[int] = [i for i in device_map_blocks if i not in blocks] if len(_A ) != 0: raise ValueError( 'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.' ' These attention blocks were specified more than once: ' + str(_A ) ) if len(_A ) != 0: raise ValueError( 'There are attention blocks for this model that are not specified in the device_map. Add these attention ' 'blocks to a device on the device_map: ' + str(_A ) ) if len(_A ) != 0: raise ValueError( 'The device_map contains more attention blocks than this model has. Remove these from the device_map:' + str(_A ) ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : int = list(range(_A ) ) _lowerCAmelCase : Dict = int(ceil(n_layers / len(_A ) ) ) _lowerCAmelCase : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , _A , _A )] return dict(zip(_A , _A ) )
630
'''simple docstring''' from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
630
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : List[str] = { """configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = [ """MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegatronBertForCausalLM""", """MegatronBertForMaskedLM""", """MegatronBertForMultipleChoice""", """MegatronBertForNextSentencePrediction""", """MegatronBertForPreTraining""", """MegatronBertForQuestionAnswering""", """MegatronBertForSequenceClassification""", """MegatronBertForTokenClassification""", """MegatronBertModel""", """MegatronBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""} lowerCAmelCase : Optional[int] = { """vocab_file""": { """AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""", } } lowerCAmelCase : Union[str, Any] = { """AI-Sweden/gpt-sw3-126m""": 20_48, """AI-Sweden/gpt-sw3-350m""": 20_48, """AI-Sweden/gpt-sw3-1.6b""": 20_48, """AI-Sweden/gpt-sw3-6.7b""": 20_48, """AI-Sweden/gpt-sw3-20b""": 20_48, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase : List[Any] = kwargs.get('name_or_path' ) if name_or_path is None: logger.warning( 'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,' ' you are testing the model, this can safely be ignored' ) _lowerCAmelCase : Any = 'None' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing _lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token _lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: _lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token _lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token else: _lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token _lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token super().__init__( do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) _lowerCAmelCase : Union[str, Any] = do_lower_case _lowerCAmelCase : Optional[int] = remove_space _lowerCAmelCase : Any = keep_accents _lowerCAmelCase : Optional[int] = vocab_file _lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case__ ) # Used for whitespace normalization in input texts # fmt : off _lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '„'} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing _lowerCAmelCase : Optional[Any] = re.compile( F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' ) def __getstate__( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.__dict__.copy() _lowerCAmelCase : int = None return state def __setstate__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _lowerCAmelCase : int = {} _lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def a ( self ): '''simple docstring''' return len(self.sp_model ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ ) # Normalize whitespaces _lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] ) # NFC Unicode normalization _lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ ) return text def a ( self , snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.preprocess_text(snake_case__ ) return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.PieceToId(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.IdToPiece(snake_case__ ) @staticmethod def a ( snake_case__ ): '''simple docstring''' return out_string def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = [] _lowerCAmelCase : Optional[Any] = '' _lowerCAmelCase : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case__ ) + token _lowerCAmelCase : Union[str, Any] = True _lowerCAmelCase : List[Any] = [] else: current_sub_tokens.append(snake_case__ ) _lowerCAmelCase : List[Any] = False out_string += self.sp_model.decode(snake_case__ ) return out_string def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : int = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case__ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case__ , 'wb' ) as fi: _lowerCAmelCase : Any = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (out_vocab_file,) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ ) _lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ ) else: _lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text] _lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ ) if return_tensors is True or return_tensors == "pt": _lowerCAmelCase : int = torch.tensor(snake_case__ ) return token_ids def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.decode(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()] _lowerCAmelCase : str = ( F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:' ) return self.encode(text=snake_case__ )
630
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar lowerCAmelCase : str = TypeVar("""T""") lowerCAmelCase : List[Any] = TypeVar("""U""") class UpperCamelCase__ ( Generic[T, U] ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = key _lowerCAmelCase : str = val _lowerCAmelCase : DoubleLinkedListNode[T, U] | None = None _lowerCAmelCase : DoubleLinkedListNode[T, U] | None = None def __repr__( self ): '''simple docstring''' return ( F'Node: key: {self.key}, val: {self.val}, ' F'has next: {bool(self.next )}, has prev: {bool(self.prev )}' ) class UpperCamelCase__ ( Generic[T, U] ): """simple docstring""" def __init__( self ): '''simple docstring''' _lowerCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case__ , snake_case__ ) _lowerCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case__ , snake_case__ ) _lowerCAmelCase , _lowerCAmelCase : Dict = self.rear, self.head def __repr__( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = ['DoubleLinkedList'] _lowerCAmelCase : Tuple = self.head while node.next is not None: rep.append(str(snake_case__ ) ) _lowerCAmelCase : Tuple = node.next rep.append(str(self.rear ) ) return ",\n ".join(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _lowerCAmelCase : str = node _lowerCAmelCase : Optional[int] = previous _lowerCAmelCase : int = node _lowerCAmelCase : Optional[int] = self.rear def a ( self , snake_case__ ): '''simple docstring''' if node.prev is None or node.next is None: return None _lowerCAmelCase : Tuple = node.next _lowerCAmelCase : Tuple = node.prev _lowerCAmelCase : Tuple = None _lowerCAmelCase : int = None return node class UpperCamelCase__ ( Generic[T, U] ): """simple docstring""" __magic_name__ = {} def __init__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : DoubleLinkedList[T, U] = DoubleLinkedList() _lowerCAmelCase : Any = capacity _lowerCAmelCase : str = 0 _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : Dict = 0 _lowerCAmelCase : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ): '''simple docstring''' return ( F'CacheInfo(hits={self.hits}, misses={self.miss}, ' F'capacity={self.capacity}, current size={self.num_keys})' ) def __contains__( self , snake_case__ ): '''simple docstring''' return key in self.cache def a ( self , snake_case__ ): '''simple docstring''' if key in self.cache: self.hits += 1 _lowerCAmelCase : DoubleLinkedListNode[T, U] = self.cache[key] _lowerCAmelCase : Optional[int] = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(snake_case__ ) return node.val self.miss += 1 return None def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _lowerCAmelCase : Optional[int] = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(snake_case__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _lowerCAmelCase : Optional[int] = DoubleLinkedListNode(snake_case__ , snake_case__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _lowerCAmelCase : Tuple = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _lowerCAmelCase : Union[str, Any] = value self.list.add(snake_case__ ) @classmethod def a ( cls , snake_case__ = 128 ): '''simple docstring''' def cache_decorator_inner(snake_case__ ) -> Callable[..., U]: def cache_decorator_wrapper(*snake_case__ ) -> U: if func not in cls.decorator_function_to_instance_map: _lowerCAmelCase : Optional[int] = LRUCache(snake_case__ ) _lowerCAmelCase : List[str] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _lowerCAmelCase : Union[str, Any] = func(*snake_case__ ) cls.decorator_function_to_instance_map[func].put(args[0] , snake_case__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(snake_case__ , 'cache_info' , snake_case__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
630
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = (DDPMScheduler,) def a ( self , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**snake_case__ ) return config def a ( self ): '''simple docstring''' for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def a ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def a ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def a ( self ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=snake_case__ ) def a ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def a ( self ): '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , ) def a ( self ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def a ( self ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[Any] = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[Any] = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = len(snake_case__ ) _lowerCAmelCase : str = self.dummy_model() _lowerCAmelCase : str = self.dummy_sample_deter _lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 ) for t in reversed(range(snake_case__ ) ): # 1. predict noise residual _lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance _lowerCAmelCase : Dict = pred_prev_sample _lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case__ ) ) _lowerCAmelCase : List[str] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.scheduler_classes[0] _lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='v_prediction' ) _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = len(snake_case__ ) _lowerCAmelCase : Any = self.dummy_model() _lowerCAmelCase : Tuple = self.dummy_sample_deter _lowerCAmelCase : Optional[int] = torch.manual_seed(0 ) for t in reversed(range(snake_case__ ) ): # 1. predict noise residual _lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance _lowerCAmelCase : Tuple = pred_prev_sample _lowerCAmelCase : Any = torch.sum(torch.abs(snake_case__ ) ) _lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[int] = self.get_scheduler_config() _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=snake_case__ ) _lowerCAmelCase : Union[str, Any] = scheduler.timesteps for i, timestep in enumerate(snake_case__ ): if i == len(snake_case__ ) - 1: _lowerCAmelCase : str = -1 else: _lowerCAmelCase : Optional[Any] = timesteps[i + 1] _lowerCAmelCase : int = scheduler.previous_timestep(snake_case__ ) _lowerCAmelCase : int = prev_t.item() self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : Tuple = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(snake_case__ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : List[str] = self.get_scheduler_config() _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = [100, 87, 50, 1, 0] _lowerCAmelCase : int = len(snake_case__ ) with self.assertRaises(snake_case__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : int = self.get_scheduler_config() _lowerCAmelCase : Any = scheduler_class(**snake_case__ ) _lowerCAmelCase : Any = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=snake_case__ )
630
1
'''simple docstring''' import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__() _lowerCAmelCase : int = nn.Linear(3 , 4 ) _lowerCAmelCase : List[Any] = nn.BatchNormad(4 ) _lowerCAmelCase : List[Any] = nn.Linear(4 , 5 ) def a ( self , snake_case__ ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(snake_case__ ) ) ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' return (args[0] + 1,) + args[1:], kwargs class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' return output + 1 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = ModelForTest() _lowerCAmelCase : Any = ModelHook() add_hook_to_module(snake_case__ , snake_case__ ) self.assertEqual(test_model._hf_hook , snake_case__ ) self.assertTrue(hasattr(snake_case__ , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(snake_case__ ) self.assertFalse(hasattr(snake_case__ , '_hf_hook' ) ) self.assertFalse(hasattr(snake_case__ , '_old_forward' ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ModelForTest() _lowerCAmelCase : Dict = ModelHook() add_hook_to_module(snake_case__ , snake_case__ ) add_hook_to_module(snake_case__ , snake_case__ , append=snake_case__ ) self.assertEqual(isinstance(test_model._hf_hook , snake_case__ ) , snake_case__ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(snake_case__ , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(snake_case__ ) self.assertFalse(hasattr(snake_case__ , '_hf_hook' ) ) self.assertFalse(hasattr(snake_case__ , '_old_forward' ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ModelForTest() _lowerCAmelCase : Union[str, Any] = torch.randn(2 , 3 ) _lowerCAmelCase : Tuple = test_model(x + 1 ) _lowerCAmelCase : Dict = test_model(x + 2 ) _lowerCAmelCase : List[str] = PreForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) _lowerCAmelCase : Dict = test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _lowerCAmelCase : Union[str, Any] = PreForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) _lowerCAmelCase : Union[str, Any] = test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _lowerCAmelCase : str = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(snake_case__ , snake_case__ ) _lowerCAmelCase : Dict = test_model(snake_case__ ) assert torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = ModelForTest() _lowerCAmelCase : List[Any] = torch.randn(2 , 3 ) _lowerCAmelCase : List[Any] = test_model(snake_case__ ) _lowerCAmelCase : int = PostForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) _lowerCAmelCase : Dict = test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _lowerCAmelCase : Union[str, Any] = PostForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) _lowerCAmelCase : Any = test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _lowerCAmelCase : Any = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(snake_case__ , snake_case__ ) _lowerCAmelCase : List[Any] = test_model(snake_case__ ) assert torch.allclose(snake_case__ , output + 2 , atol=1E-5 ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = ModelForTest() _lowerCAmelCase : Tuple = torch.randn(2 , 3 ) _lowerCAmelCase : List[Any] = test_model(snake_case__ ) _lowerCAmelCase : List[Any] = PostForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) _lowerCAmelCase : Optional[int] = test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) _lowerCAmelCase : Dict = True _lowerCAmelCase : List[str] = test_model(snake_case__ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def a ( self ): '''simple docstring''' _lowerCAmelCase : str = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device _lowerCAmelCase : Optional[int] = torch.randn(2 , 3 ) _lowerCAmelCase : List[Any] = model(snake_case__ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(snake_case__ , AlignDevicesHook(io_same_device=snake_case__ ) ) _lowerCAmelCase : Tuple = torch.randn(2 , 3 ).to(0 ) _lowerCAmelCase : Union[str, Any] = model(snake_case__ ) self.assertEqual(output.device , torch.device(0 ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices _lowerCAmelCase : Union[str, Any] = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device _lowerCAmelCase : Any = torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , snake_case__ ) _lowerCAmelCase : Tuple = torch.randn(2 , 3 ) _lowerCAmelCase : List[str] = model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload _lowerCAmelCase : Optional[int] = { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) _lowerCAmelCase : Optional[Any] = torch.randn(2 , 3 ) _lowerCAmelCase : List[Any] = model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices _lowerCAmelCase : Union[str, Any] = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(snake_case__ , execution_device=snake_case__ , offload=snake_case__ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device _lowerCAmelCase : int = torch.device(snake_case__ ) self.assertEqual(model.batchnorm.running_mean.device , snake_case__ ) _lowerCAmelCase : Dict = torch.randn(2 , 3 ) _lowerCAmelCase : Tuple = model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(snake_case__ , execution_device=snake_case__ , offload=snake_case__ , offload_buffers=snake_case__ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) _lowerCAmelCase : List[Any] = torch.randn(2 , 3 ) _lowerCAmelCase : Any = model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices _lowerCAmelCase : Tuple = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( snake_case__ , execution_device=snake_case__ , offload=snake_case__ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device _lowerCAmelCase : str = torch.device(snake_case__ ) self.assertEqual(model.batchnorm.running_mean.device , snake_case__ ) _lowerCAmelCase : int = torch.randn(2 , 3 ) _lowerCAmelCase : Tuple = model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( snake_case__ , execution_device=snake_case__ , offload=snake_case__ , weights_map=model.state_dict() , offload_buffers=snake_case__ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) _lowerCAmelCase : List[str] = torch.randn(2 , 3 ) _lowerCAmelCase : int = model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
630
'''simple docstring''' import socket def lowercase (): """simple docstring""" _lowerCAmelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _lowerCAmelCase : Optional[int] = socket.gethostname() _lowerCAmelCase : Any = 1_2_3_1_2 sock.connect((host, port) ) sock.send(B'Hello server!' ) with open('Received_file' , 'wb' ) as out_file: print('File opened' ) print('Receiving data...' ) while True: _lowerCAmelCase : Union[str, Any] = sock.recv(1_0_2_4 ) if not data: break out_file.write(_A ) print('Successfully received the file' ) sock.close() print('Connection closed' ) if __name__ == "__main__": main()
630
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=None , ): '''simple docstring''' _lowerCAmelCase : Dict = size if size is not None else {'shortest_edge': 20} _lowerCAmelCase : List[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18} _lowerCAmelCase : Any = parent _lowerCAmelCase : Optional[int] = batch_size _lowerCAmelCase : int = num_channels _lowerCAmelCase : Optional[Any] = image_size _lowerCAmelCase : Tuple = min_resolution _lowerCAmelCase : Optional[int] = max_resolution _lowerCAmelCase : Any = do_resize _lowerCAmelCase : str = size _lowerCAmelCase : List[Any] = do_center_crop _lowerCAmelCase : List[Any] = crop_size def a ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MobileNetVaImageProcessor if is_vision_available() else None def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = MobileNetVaImageProcessingTester(self ) @property def a ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , 'do_resize' ) ) self.assertTrue(hasattr(snake_case__ , 'size' ) ) self.assertTrue(hasattr(snake_case__ , 'do_center_crop' ) ) self.assertTrue(hasattr(snake_case__ , 'crop_size' ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) _lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input _lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase : Optional[int] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) # Test not batched input _lowerCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase : Any = image_processing(snake_case__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input _lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase : Optional[int] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
630
'''simple docstring''' import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel lowerCAmelCase : Tuple = False lowerCAmelCase : str = True lowerCAmelCase : List[Any] = False if __name__ == "__main__": lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--repo_path""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") lowerCAmelCase : Optional[int] = parser.parse_args() lowerCAmelCase : int = { """image_size""": """sample_size""", """num_res_blocks""": """layers_per_block""", """block_channels""": """block_out_channels""", """down_blocks""": """down_block_types""", """up_blocks""": """up_block_types""", """downscale_freq_shift""": """freq_shift""", """resnet_num_groups""": """norm_num_groups""", """resnet_act_fn""": """act_fn""", """resnet_eps""": """norm_eps""", """num_head_channels""": """attention_head_dim""", } lowerCAmelCase : int = { """time_steps""": """time_proj""", """mid""": """mid_block""", """downsample_blocks""": """down_blocks""", """upsample_blocks""": """up_blocks""", } lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet""" with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader: lowerCAmelCase : int = reader.read() lowerCAmelCase : List[str] = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, """config.json"""): lowerCAmelCase : str = UNetaDModel(**config) else: lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel lowerCAmelCase : Dict = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) lowerCAmelCase : Union[str, Any] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: lowerCAmelCase : str = config[key] del config[key] lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]] lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]] if do_only_weights: lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin""")) lowerCAmelCase : str = {} for param_key, param_value in state_dict.items(): if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""): continue lowerCAmelCase : str = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(""".""")[0] == key: lowerCAmelCase : Dict = param_value lowerCAmelCase : Tuple = True if not has_changed: lowerCAmelCase : Tuple = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
630
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = ["image_processor", "tokenizer"] __magic_name__ = "FlavaImageProcessor" __magic_name__ = ("BertTokenizer", "BertTokenizerFast") def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , snake_case__ , ) _lowerCAmelCase : Optional[int] = kwargs.pop('feature_extractor' ) _lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(snake_case__ , snake_case__ ) _lowerCAmelCase : str = self.image_processor def __call__( self , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = False , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ): '''simple docstring''' if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: _lowerCAmelCase : List[Any] = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) if images is not None: _lowerCAmelCase : Dict = self.image_processor( snake_case__ , return_image_mask=snake_case__ , return_codebook_pixels=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) if text is not None and images is not None: encoding.update(snake_case__ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def a ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.tokenizer.model_input_names _lowerCAmelCase : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def a ( self ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case__ , ) return self.image_processor_class @property def a ( self ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case__ , ) return self.image_processor
630
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ): '''simple docstring''' super().__init__() _lowerCAmelCase : Union[str, Any] = pad_token_id _lowerCAmelCase : List[Any] = max_length _lowerCAmelCase : Tuple = vocab _lowerCAmelCase : str = merges _lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ ) @classmethod def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()] _lowerCAmelCase : Any = tokenizer.get_vocab() return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ ) @classmethod def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ ) return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ ) @classmethod def a ( cls , snake_case__ ): '''simple docstring''' return cls(**snake_case__ ) def a ( self ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = self.tf_tokenizer(snake_case__ ) _lowerCAmelCase : str = tf.ones_like(snake_case__ ) if self.pad_token_id is not None: # pad the tokens up to max length _lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: _lowerCAmelCase , _lowerCAmelCase : str = pad_model_inputs( snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
630
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable lowerCAmelCase : Optional[int] = list[list[float | int]] def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : int = len(_A ) _lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_A )] _lowerCAmelCase : int _lowerCAmelCase : int _lowerCAmelCase : int _lowerCAmelCase : int _lowerCAmelCase : int _lowerCAmelCase : float for row in range(_A ): for col in range(_A ): _lowerCAmelCase : List[str] = matrix[row][col] _lowerCAmelCase : List[Any] = vector[row][0] _lowerCAmelCase : Optional[Any] = 0 _lowerCAmelCase : List[str] = 0 while row < size and col < size: # pivoting _lowerCAmelCase : str = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_A , _A ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _lowerCAmelCase , _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _A ): _lowerCAmelCase : Union[str, Any] = augmented[rowa][col] / augmented[row][col] _lowerCAmelCase : List[str] = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _A ): for row in range(_A ): _lowerCAmelCase : Union[str, Any] = augmented[row][col] / augmented[col][col] for cola in range(_A , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(_A ) ] def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = len(_A ) _lowerCAmelCase : Matrix = [[0 for _ in range(_A )] for _ in range(_A )] _lowerCAmelCase : Matrix = [[0] for _ in range(_A )] _lowerCAmelCase : Matrix _lowerCAmelCase : int _lowerCAmelCase : int _lowerCAmelCase : int for x_val, y_val in enumerate(_A ): for col in range(_A ): _lowerCAmelCase : Dict = (x_val + 1) ** (size - col - 1) _lowerCAmelCase : Optional[Any] = y_val _lowerCAmelCase : Optional[int] = solve(_A , _A ) def interpolated_func(_A ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_A ) ) return interpolated_func def lowercase (_A ): """simple docstring""" return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**1_0 ) def lowercase (_A = question_function , _A = 1_0 ): """simple docstring""" _lowerCAmelCase : list[int] = [func(_A ) for x_val in range(1 , order + 1 )] _lowerCAmelCase : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _lowerCAmelCase : int = 0 _lowerCAmelCase : Callable[[int], int] _lowerCAmelCase : int for poly in polynomials: _lowerCAmelCase : Union[str, Any] = 1 while func(_A ) == poly(_A ): x_val += 1 ret += poly(_A ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
630
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Optional[int] = { """configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""], """feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""], """processing_mctct""": ["""MCTCTProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MCTCTForCTC""", """MCTCTModel""", """MCTCTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Any = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return model @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Dict = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , ) return model @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : int = AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , ) _lowerCAmelCase : int = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return vqvae, unet @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase : int = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) _lowerCAmelCase : List[Any] = DDPMScheduler() _lowerCAmelCase : Union[str, Any] = AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : List[str] = torch.Generator(device=snake_case__ ).manual_seed(42 ) _lowerCAmelCase : Optional[int] = pipe(generator=snake_case__ , steps=4 ) _lowerCAmelCase : List[Any] = output.audios[0] _lowerCAmelCase : Tuple = output.images[0] _lowerCAmelCase : Tuple = torch.Generator(device=snake_case__ ).manual_seed(42 ) _lowerCAmelCase : str = pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ ) _lowerCAmelCase : Optional[Any] = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) _lowerCAmelCase : Tuple = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] _lowerCAmelCase : Optional[Any] = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10] _lowerCAmelCase : Dict = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 _lowerCAmelCase : List[str] = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) _lowerCAmelCase : Any = DDIMScheduler() _lowerCAmelCase : Tuple = self.dummy_vqvae_and_unet _lowerCAmelCase : List[str] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ ) _lowerCAmelCase : Dict = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) np.random.seed(0 ) _lowerCAmelCase : Optional[int] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(42 ) _lowerCAmelCase : Union[str, Any] = pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 ) _lowerCAmelCase : List[Any] = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) _lowerCAmelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] _lowerCAmelCase : List[Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 _lowerCAmelCase : int = self.dummy_unet_condition _lowerCAmelCase : List[Any] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ ) _lowerCAmelCase : Optional[Any] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) np.random.seed(0 ) _lowerCAmelCase : List[Any] = torch.rand((1, 1, 10) ) _lowerCAmelCase : Tuple = pipe(generator=snake_case__ , encoding=snake_case__ ) _lowerCAmelCase : Optional[Any] = output.images[0] _lowerCAmelCase : Dict = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] _lowerCAmelCase : Optional[int] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = torch_device _lowerCAmelCase : Optional[Any] = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' ) _lowerCAmelCase : str = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(42 ) _lowerCAmelCase : Dict = pipe(generator=snake_case__ ) _lowerCAmelCase : Any = output.audios[0] _lowerCAmelCase : Dict = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] _lowerCAmelCase : int = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] _lowerCAmelCase : Dict = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
630
'''simple docstring''' lowerCAmelCase : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def lowercase (_A ): """simple docstring""" _lowerCAmelCase : str = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0] number //= 1_0_0_0_0_0 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowerCAmelCase : list[bool | None] = [None] * 10_00_00_00 lowerCAmelCase : List[str] = True lowerCAmelCase : Union[str, Any] = False def lowercase (_A ): """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore _lowerCAmelCase : Any = chain(next_number(_A ) ) _lowerCAmelCase : List[str] = number_chain while number < 1_0_0_0_0_0_0_0: _lowerCAmelCase : Tuple = number_chain number *= 1_0 return number_chain def lowercase (_A = 1_0_0_0_0_0_0_0 ): """simple docstring""" for i in range(1 , _A ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(_A ) if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution() = }''')
630
1
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py lowerCAmelCase : str = """src/diffusers""" # Matches is_xxx_available() lowerCAmelCase : Optional[Any] = re.compile(r"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla lowerCAmelCase : Any = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") lowerCAmelCase : Optional[Any] = """ {0} = None """ lowerCAmelCase : Tuple = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ lowerCAmelCase : Optional[Any] = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = _re_backend.findall(_A ) if len(_A ) == 0: return None return "_and_".join(_A ) def lowercase (): """simple docstring""" with open(os.path.join(_A , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f: _lowerCAmelCase : Union[str, Any] = f.readlines() # Get to the point we do the actual imports for type checking _lowerCAmelCase : Union[str, Any] = 0 _lowerCAmelCase : Union[str, Any] = {} # Go through the end of the file while line_index < len(_A ): # If the line contains is_backend_available, we grab all objects associated with the `else` block _lowerCAmelCase : Any = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('else:' ): line_index += 1 line_index += 1 _lowerCAmelCase : List[str] = [] # Until we unindent, add backend objects to the list while line_index < len(_A ) and len(lines[line_index] ) > 1: _lowerCAmelCase : Union[str, Any] = lines[line_index] _lowerCAmelCase : Union[str, Any] = _re_single_line_import.search(_A ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(_A ) > 0: _lowerCAmelCase : int = objects else: line_index += 1 return backend_specific_objects def lowercase (_A , _A ): """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(_A ) elif name.islower(): return DUMMY_FUNCTION.format(_A , _A ) else: return DUMMY_CLASS.format(_A , _A ) def lowercase (_A=None ): """simple docstring""" if backend_specific_objects is None: _lowerCAmelCase : List[str] = read_init() # For special correspondence backend to module name as used in the function requires_modulename _lowerCAmelCase : List[str] = {} for backend, objects in backend_specific_objects.items(): _lowerCAmelCase : Union[str, Any] = '[' + ', '.join(f'"{b}"' for b in backend.split('_and_' ) ) + ']' _lowerCAmelCase : Dict = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(_A , _A ) for o in objects] ) _lowerCAmelCase : List[Any] = dummy_file return dummy_files def lowercase (_A=False ): """simple docstring""" _lowerCAmelCase : List[Any] = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py _lowerCAmelCase : List[str] = {'torch': 'pt'} # Locate actual dummy modules and read their content. _lowerCAmelCase : Union[str, Any] = os.path.join(_A , 'utils' ) _lowerCAmelCase : Dict = { backend: os.path.join(_A , f'dummy_{short_names.get(_A , _A )}_objects.py' ) for backend in dummy_files.keys() } _lowerCAmelCase : Optional[Any] = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(_A ): with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f: _lowerCAmelCase : List[str] = f.read() else: _lowerCAmelCase : Any = '' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'Updating diffusers.utils.dummy_{short_names.get(_A , _A )}_objects.py as the main ' '__init__ has new objects.' ) with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( 'The main __init__ has objects that are not present in ' f'diffusers.utils.dummy_{short_names.get(_A , _A )}_objects.py. Run `make fix-copies` ' 'to fix this.' ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowerCAmelCase : Dict = parser.parse_args() check_dummies(args.fix_and_overwrite)
630
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__="swish" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , snake_case__=0.25 , snake_case__=0.0 , snake_case__=0.0 , ): '''simple docstring''' _lowerCAmelCase : Tuple = parent _lowerCAmelCase : Optional[int] = batch_size _lowerCAmelCase : List[Any] = image_size _lowerCAmelCase : List[Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 ) _lowerCAmelCase : Optional[Any] = hidden_act _lowerCAmelCase : List[Any] = conv_kernel_size _lowerCAmelCase : Optional[Any] = output_stride _lowerCAmelCase : List[Any] = classifier_dropout_prob _lowerCAmelCase : str = use_labels _lowerCAmelCase : List[str] = is_training _lowerCAmelCase : Optional[int] = num_labels _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : str = scope _lowerCAmelCase : Any = width_multiplier _lowerCAmelCase : Union[str, Any] = ffn_dropout _lowerCAmelCase : Optional[int] = attn_dropout def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : Optional[Any] = None _lowerCAmelCase : Dict = None if self.use_labels: _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def a ( self ): '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = MobileViTVaModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : str = model(snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.num_labels _lowerCAmelCase : List[Any] = MobileViTVaForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Dict = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs _lowerCAmelCase : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) __magic_name__ = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : int = MobileViTVaModelTester(self ) _lowerCAmelCase : Dict = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def a ( self ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def a ( self ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : str = model_class(snake_case__ ) _lowerCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : int = [*signature.parameters.keys()] _lowerCAmelCase : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ): _lowerCAmelCase : Dict = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): _lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) _lowerCAmelCase : List[str] = outputs.hidden_states _lowerCAmelCase : List[str] = 5 self.assertEqual(len(snake_case__ ) , snake_case__ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _lowerCAmelCase : List[Any] = 2 for i in range(len(snake_case__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Optional[int] = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase : Any = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Dict = MobileViTVaModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def lowercase (): """simple docstring""" _lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def a ( self ): '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( snake_case__ ) _lowerCAmelCase : str = self.default_image_processor _lowerCAmelCase : Any = prepare_img() _lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : Tuple = model(**snake_case__ ) # verify the logits _lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) _lowerCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : Any = model.to(snake_case__ ) _lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : Optional[int] = prepare_img() _lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : int = model(**snake_case__ ) _lowerCAmelCase : Dict = outputs.logits # verify the logits _lowerCAmelCase : str = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , snake_case__ ) _lowerCAmelCase : Any = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : List[Any] = model.to(snake_case__ ) _lowerCAmelCase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : Tuple = prepare_img() _lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : Any = model(**snake_case__ ) _lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu() _lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] ) _lowerCAmelCase : List[Any] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , snake_case__ ) _lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) _lowerCAmelCase : Tuple = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , snake_case__ )
630
1
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor lowerCAmelCase : Any = logging.getLogger(__name__) lowerCAmelCase : int = 50 # max width of layer names lowerCAmelCase : Tuple = 70 # max width of quantizer names def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Dict = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_A , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_A , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_A , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_A , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_A , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_A , type=_A , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_A , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def lowercase (_A ): """simple docstring""" if args.calibrator == "max": _lowerCAmelCase : Dict = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _lowerCAmelCase : List[str] = 'histogram' elif args.calibrator == "mse": _lowerCAmelCase : Any = 'histogram' else: raise ValueError(f'Invalid calibrator {args.calibrator}' ) _lowerCAmelCase : Dict = QuantDescriptor(num_bits=args.aprec , calib_method=_A ) _lowerCAmelCase : Dict = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_A ) quant_nn.QuantLinear.set_default_quant_desc_weight(_A ) def lowercase (_A , _A , _A=False , _A=False ): """simple docstring""" logger.info('Configuring Model for Quantization' ) logger.info(f'using quantization package {pytorch_quantization.__file__}' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_A , ['embeddings'] , which='weight' , _disabled=_A ) if args.quant_disable: set_quantizer_by_name(_A , [''] , _disabled=_A ) if args.quant_disable_keyword: set_quantizer_by_name(_A , args.quant_disable_keyword , _disabled=_A ) if args.quant_disable_layer_module: set_quantizer_by_name(_A , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_A ) if args.quant_enable_layer_module: set_quantizer_by_name(_A , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_A ) if args.recalibrate_weights: recalibrate_weights(_A ) if args.fuse_qkv: fuse_qkv(_A , _A ) if args.clip_gelu: clip_gelu(_A , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_A ) def lowercase (_A ): """simple docstring""" logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'{name:80}: {module}' ) def lowercase (_A , _A ): """simple docstring""" logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_A ) def lowercase (_A , _A ): """simple docstring""" def fusea(_A , _A , _A ): for mod in [qq, qk, qv]: if not hasattr(_A , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _lowerCAmelCase : List[str] = qq._amax.detach().item() _lowerCAmelCase : Union[str, Any] = qk._amax.detach().item() _lowerCAmelCase : Tuple = qv._amax.detach().item() _lowerCAmelCase : List[Any] = max(_A , _A , _A ) qq._amax.fill_(_A ) qk._amax.fill_(_A ) qv._amax.fill_(_A ) logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(f'FUSE_QKV: {name:{name_width}}' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def lowercase (_A , _A ): """simple docstring""" for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _lowerCAmelCase : Tuple = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_A ) _lowerCAmelCase : str = mod._input_quantizer._amax.data.detach().item() logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' ) def lowercase (_A ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(_A , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _lowerCAmelCase : List[Any] = mod.weight.shape[0] _lowerCAmelCase : str = mod._weight_quantizer._amax.detach() _lowerCAmelCase : Dict = torch.ones(_A , dtype=amax.dtype , device=amax.device ) * amax print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' ) def lowercase (_A ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(_A , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _lowerCAmelCase : List[str] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _lowerCAmelCase : Any = set(range(len(mod.weight.size() ) ) ) - axis_set _lowerCAmelCase : str = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_A , keepdims=_A ).detach() logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' ) _lowerCAmelCase : int = amax def lowercase (_A , _A=2_5 , _A=1_8_0 , _A=None ): """simple docstring""" if ignore is None: _lowerCAmelCase : Dict = [] elif not isinstance(_A , _A ): _lowerCAmelCase : Tuple = [ignore] _lowerCAmelCase : Dict = 0 for name, mod in model.named_modules(): if not hasattr(_A , 'weight' ): continue _lowerCAmelCase : Any = max(_A , len(_A ) ) for name, mod in model.named_modules(): _lowerCAmelCase : Optional[Any] = getattr(_A , '_input_quantizer' , _A ) _lowerCAmelCase : Any = getattr(_A , '_weight_quantizer' , _A ) if not hasattr(_A , 'weight' ): continue if type(_A ) in ignore: continue if [True for s in ignore if type(_A ) is str and s in name]: continue _lowerCAmelCase : Optional[int] = f'Act:{input_q.extra_repr()}' _lowerCAmelCase : str = f'Wgt:{weight_q.extra_repr()}' _lowerCAmelCase : Any = f'{name:{name_width}} {act_str} {wgt_str}' if len(_A ) <= line_width: logger.info(_A ) else: logger.info(f'{name:{name_width}} {act_str}' ) logger.info(f'{" ":{name_width}} {wgt_str}' ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Dict = 0 for name, mod in model.named_modules(): if isinstance(_A , pytorch_quantization.nn.TensorQuantizer ): print(f'{name:80} {mod}' ) count += 1 print(f'{count} TensorQuantizers found in model' ) def lowercase (_A , _A , _A , _A , _A ): """simple docstring""" _lowerCAmelCase : Any = getattr(_A , _A , _A ) if quantizer_mod is not None: assert hasattr(_A , _A ) setattr(_A , _A , _A ) else: logger.warning(f'{name} has no {quantizer}' ) def lowercase (_A , _A , _A="both" , **_A ): """simple docstring""" _lowerCAmelCase : List[Any] = f'Warning: changing {which} quantizers of {name:{qname_width}}' for k, v in kwargs.items(): s += f' {k}={v}' if which in ["input", "both"]: set_quantizer(_A , _A , '_input_quantizer' , _A , _A ) if which in ["weight", "both"]: set_quantizer(_A , _A , '_weight_quantizer' , _A , _A ) logger.info(_A ) def lowercase (_A , _A , **_A ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(_A , '_input_quantizer' ) or hasattr(_A , '_weight_quantizer' ): for n in names: if re.search(_A , _A ): set_quantizers(_A , _A , **_A ) elif name.endswith('_quantizer' ): for n in names: if re.search(_A , _A ): _lowerCAmelCase : Any = f'Warning: changing {name:{name_width}}' for k, v in kwargs.items(): s += f' {k}={v}' setattr(_A , _A , _A ) logger.info(_A )
630
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Dict = 'The dog is cute and lives in the garden house' _lowerCAmelCase : List[str] = jnp.array([tokenizer.encode(snake_case__ )] ) _lowerCAmelCase : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _lowerCAmelCase : Tuple = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) _lowerCAmelCase : Union[str, Any] = model(snake_case__ )['last_hidden_state'] self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
630
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase : Any = None lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Dict = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : Optional[Any] = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off lowerCAmelCase : str = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = NllbTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Dict = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token _lowerCAmelCase : Any = legacy_behaviour super().__init__( vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , ) _lowerCAmelCase : Optional[Any] = vocab_file _lowerCAmelCase : Tuple = False if not self.vocab_file else True _lowerCAmelCase : List[str] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _lowerCAmelCase : Dict = { lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCAmelCase : str = src_lang if src_lang is not None else 'eng_Latn' _lowerCAmelCase : int = self.convert_tokens_to_ids(self._src_lang ) _lowerCAmelCase : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : Dict = [self.sep_token_id] _lowerCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowerCAmelCase : Union[str, Any] = src_lang _lowerCAmelCase : List[Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ ) _lowerCAmelCase : List[Any] = tgt_lang_id return inputs def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Dict = src_lang _lowerCAmelCase : Tuple = tgt_lang return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def a ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def a ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : List[Any] = [] _lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : Optional[int] = [self.cur_lang_code] _lowerCAmelCase : int = [self.eos_token_id] _lowerCAmelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : int = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.convert_tokens_to_ids(snake_case__ ) if self.legacy_behaviour: _lowerCAmelCase : List[str] = [] _lowerCAmelCase : Tuple = [self.eos_token_id, self.cur_lang_code] else: _lowerCAmelCase : int = [self.cur_lang_code] _lowerCAmelCase : List[Any] = [self.eos_token_id] _lowerCAmelCase : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCAmelCase : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCAmelCase : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _lowerCAmelCase : int = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
630
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Dict = len(_A ) while cur > 1: # Find the maximum number in arr _lowerCAmelCase : int = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _lowerCAmelCase : Dict = arr[mi::-1] + arr[mi + 1 : len(_A )] # Reverse whole list _lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(_A )] cur -= 1 return arr if __name__ == "__main__": lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
630
1
'''simple docstring''' lowerCAmelCase : Tuple = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
630
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : str = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "gptj" __magic_name__ = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : int = vocab_size _lowerCAmelCase : Optional[int] = n_positions _lowerCAmelCase : Optional[int] = n_embd _lowerCAmelCase : Optional[int] = n_layer _lowerCAmelCase : str = n_head _lowerCAmelCase : Tuple = n_inner _lowerCAmelCase : Tuple = rotary_dim _lowerCAmelCase : Optional[int] = activation_function _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : List[str] = embd_pdrop _lowerCAmelCase : int = attn_pdrop _lowerCAmelCase : Any = layer_norm_epsilon _lowerCAmelCase : Optional[int] = initializer_range _lowerCAmelCase : List[str] = use_cache _lowerCAmelCase : Dict = bos_token_id _lowerCAmelCase : Any = eos_token_id super().__init__( bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ ) if not getattr(self._config , 'pad_token_id' , snake_case__ ): # TODO: how to do that better? _lowerCAmelCase : Any = 0 @property def a ( self ): '''simple docstring''' _lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(snake_case__ , direction='inputs' ) _lowerCAmelCase : int = {0: 'batch', 1: 'past_sequence + sequence'} else: _lowerCAmelCase : int = {0: 'batch', 1: 'sequence'} return common_inputs @property def a ( self ): '''simple docstring''' return self._config.n_layer @property def a ( self ): '''simple docstring''' return self._config.n_head def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = super(snake_case__ , self ).generate_dummy_inputs( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) # We need to order the input in the way they appears in the forward() _lowerCAmelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs['input_ids'].shape # Not using the same length for past_key_values _lowerCAmelCase : Any = seqlen + 2 _lowerCAmelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _lowerCAmelCase : Tuple = [ (torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers ) ] _lowerCAmelCase : Tuple = common_inputs['attention_mask'] if self.use_past: _lowerCAmelCase : Any = ordered_inputs['attention_mask'].dtype _lowerCAmelCase : Union[str, Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 ) return ordered_inputs @property def a ( self ): '''simple docstring''' return 13
630
1
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowerCAmelCase : Optional[int] = False class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _lowerCAmelCase : List[Any] = torch.manual_seed(0 ) _lowerCAmelCase : List[str] = pipe.dual_guided( prompt='first prompt' , image=snake_case__ , text_to_image_strength=0.75 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(snake_case__ ) _lowerCAmelCase : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(snake_case__ , torch_dtype=torch.floataa ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Tuple = generator.manual_seed(0 ) _lowerCAmelCase : List[Any] = pipe.dual_guided( prompt='first prompt' , image=snake_case__ , text_to_image_strength=0.75 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : str = 'cyberpunk 2077' _lowerCAmelCase : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _lowerCAmelCase : Optional[int] = torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = pipe.dual_guided( prompt=snake_case__ , image=snake_case__ , text_to_image_strength=0.75 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images _lowerCAmelCase : Union[str, Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _lowerCAmelCase : Union[str, Any] = 'A painting of a squirrel eating a burger ' _lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = pipe.text_to_image( prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images _lowerCAmelCase : Optional[Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase : Dict = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _lowerCAmelCase : int = pipe.image_variation(snake_case__ , generator=snake_case__ , output_type='numpy' ).images _lowerCAmelCase : List[Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase : Dict = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
630
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Any = { """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : Any = { """microsoft/unispeech-sat-base-100h-libri-ft""": ( """https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json""" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "unispeech-sat" def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1E-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(512, 512, 512, 512, 512, 512, 512) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__=320 , snake_case__=2 , snake_case__=0.1 , snake_case__=100 , snake_case__=256 , snake_case__=256 , snake_case__=0.1 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=(512, 512, 512, 512, 1500) , snake_case__=(5, 3, 3, 1, 1) , snake_case__=(1, 2, 3, 1, 1) , snake_case__=512 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=504 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) _lowerCAmelCase : str = hidden_size _lowerCAmelCase : Union[str, Any] = feat_extract_norm _lowerCAmelCase : Optional[int] = feat_extract_activation _lowerCAmelCase : Any = list(snake_case__ ) _lowerCAmelCase : Any = list(snake_case__ ) _lowerCAmelCase : int = list(snake_case__ ) _lowerCAmelCase : Optional[int] = conv_bias _lowerCAmelCase : str = num_conv_pos_embeddings _lowerCAmelCase : Dict = num_conv_pos_embedding_groups _lowerCAmelCase : Union[str, Any] = len(self.conv_dim ) _lowerCAmelCase : int = num_hidden_layers _lowerCAmelCase : Tuple = intermediate_size _lowerCAmelCase : int = hidden_act _lowerCAmelCase : Tuple = num_attention_heads _lowerCAmelCase : Tuple = hidden_dropout _lowerCAmelCase : Dict = attention_dropout _lowerCAmelCase : Tuple = activation_dropout _lowerCAmelCase : int = feat_proj_dropout _lowerCAmelCase : str = final_dropout _lowerCAmelCase : Optional[Any] = layerdrop _lowerCAmelCase : Optional[Any] = layer_norm_eps _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : Tuple = vocab_size _lowerCAmelCase : Optional[int] = num_clusters _lowerCAmelCase : Any = do_stable_layer_norm _lowerCAmelCase : Any = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase : Tuple = apply_spec_augment _lowerCAmelCase : Dict = mask_time_prob _lowerCAmelCase : List[Any] = mask_time_length _lowerCAmelCase : str = mask_time_min_masks _lowerCAmelCase : Optional[int] = mask_feature_prob _lowerCAmelCase : Dict = mask_feature_length _lowerCAmelCase : Optional[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCAmelCase : Any = num_codevectors_per_group _lowerCAmelCase : int = num_codevector_groups _lowerCAmelCase : str = contrastive_logits_temperature _lowerCAmelCase : Optional[Any] = feat_quantizer_dropout _lowerCAmelCase : Tuple = num_negatives _lowerCAmelCase : Dict = codevector_dim _lowerCAmelCase : List[str] = proj_codevector_dim _lowerCAmelCase : Any = diversity_loss_weight # ctc loss _lowerCAmelCase : Dict = ctc_loss_reduction _lowerCAmelCase : Any = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCAmelCase : Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCAmelCase : Tuple = list(snake_case__ ) _lowerCAmelCase : Optional[Any] = list(snake_case__ ) _lowerCAmelCase : List[str] = list(snake_case__ ) _lowerCAmelCase : Union[str, Any] = xvector_output_dim @property def a ( self ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
630
'''simple docstring''' import math from datetime import datetime, timedelta def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[Any] = year % 1_9 _lowerCAmelCase : Any = year % 4 _lowerCAmelCase : Optional[int] = year % 7 _lowerCAmelCase : int = math.floor(year / 1_0_0 ) _lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) _lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4 _lowerCAmelCase : Dict = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 _lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon _lowerCAmelCase : Union[str, Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(_A , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(_A , 4 , 1_8 ) else: return datetime(_A , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (19_94, 20_00, 20_10, 20_21, 20_23): lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was""" print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
630
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase : int = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60] _lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12] _lowerCAmelCase : Dict = 100 self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex( snake_case__ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
630
1
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = MvpTokenizer __magic_name__ = MvpTokenizerFast __magic_name__ = True __magic_name__ = filter_roberta_detectors def a ( self ): '''simple docstring''' super().setUp() _lowerCAmelCase : List[Any] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _lowerCAmelCase : Dict = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) _lowerCAmelCase : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _lowerCAmelCase : str = {'unk_token': '<unk>'} _lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(snake_case__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(snake_case__ ) ) def a ( self , **snake_case__ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self , **snake_case__ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def a ( self ): '''simple docstring''' return MvpTokenizer.from_pretrained('RUCAIBox/mvp' ) @cached_property def a ( self ): '''simple docstring''' return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowerCAmelCase : Any = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCAmelCase : Tuple = tokenizer(snake_case__ , max_length=len(snake_case__ ) , padding=snake_case__ , return_tensors='pt' ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _lowerCAmelCase : List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(snake_case__ , snake_case__ ) # Test that special tokens are reset @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCAmelCase : List[Any] = tokenizer(snake_case__ , padding=snake_case__ , return_tensors='pt' ) # check if input_ids are returned and no labels self.assertIn('input_ids' , snake_case__ ) self.assertIn('attention_mask' , snake_case__ ) self.assertNotIn('labels' , snake_case__ ) self.assertNotIn('decoder_attention_mask' , snake_case__ ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCAmelCase : Union[str, Any] = tokenizer(text_target=snake_case__ , max_length=32 , padding='max_length' , return_tensors='pt' ) self.assertEqual(32 , targets['input_ids'].shape[1] ) @require_torch def a ( self ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCAmelCase : List[str] = tokenizer( ['I am a small frog' * 1024, 'I am a small frog'] , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual(batch.input_ids.shape , (2, 1024) ) @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = ['A long paragraph for summarization.'] _lowerCAmelCase : Dict = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCAmelCase : Optional[Any] = tokenizer(snake_case__ , text_target=snake_case__ , return_tensors='pt' ) _lowerCAmelCase : Union[str, Any] = inputs['input_ids'] _lowerCAmelCase : List[Any] = inputs['labels'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) _lowerCAmelCase : List[Any] = 'A, <mask> AllenNLP sentence.' _lowerCAmelCase : List[str] = tokenizer_r.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ ) _lowerCAmelCase : Union[str, Any] = tokenizer_p.encode_plus(snake_case__ , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) _lowerCAmelCase : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) _lowerCAmelCase : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( snake_case__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( snake_case__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
630
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps _lowerCAmelCase : Any = boundary[0] _lowerCAmelCase : List[str] = boundary[1] _lowerCAmelCase : Tuple = make_points(_A , _A , _A ) _lowerCAmelCase : Tuple = 0.0 y += (h / 2.0) * f(_A ) for i in x_i: # print(i) y += h * f(_A ) y += (h / 2.0) * f(_A ) return y def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = a + h while x < (b - h): yield x _lowerCAmelCase : Any = x + h def lowercase (_A ): # enter your function here """simple docstring""" _lowerCAmelCase : int = (x - 0) * (x - 0) return y def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration _lowerCAmelCase : Dict = 1.0 # Upper bound of integration _lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution _lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration _lowerCAmelCase : List[Any] = method_a(_A , _A ) print(f'y = {y}' ) if __name__ == "__main__": main()
630
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ): '''simple docstring''' _lowerCAmelCase : Any = parent _lowerCAmelCase : Tuple = batch_size _lowerCAmelCase : Any = image_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : List[str] = embeddings_size _lowerCAmelCase : List[Any] = hidden_sizes _lowerCAmelCase : Union[str, Any] = depths _lowerCAmelCase : List[str] = is_training _lowerCAmelCase : Union[str, Any] = use_labels _lowerCAmelCase : List[Any] = hidden_act _lowerCAmelCase : Dict = num_labels _lowerCAmelCase : str = scope _lowerCAmelCase : Optional[Any] = len(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : Optional[int] = None if self.use_labels: _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase : int = self.get_config() return config, pixel_values, labels def a ( self ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = TFRegNetModel(config=snake_case__ ) _lowerCAmelCase : List[Any] = model(snake_case__ , training=snake_case__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.num_labels _lowerCAmelCase : int = TFRegNetForImageClassification(snake_case__ ) _lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs _lowerCAmelCase : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () __magic_name__ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = TFRegNetModelTester(self ) _lowerCAmelCase : Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def a ( self ): '''simple docstring''' return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def a ( self ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def a ( self ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : List[str] = model_class(snake_case__ ) _lowerCAmelCase : List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()] _lowerCAmelCase : Tuple = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ): _lowerCAmelCase : List[str] = model_class(snake_case__ ) _lowerCAmelCase : str = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ ) _lowerCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCAmelCase : Tuple = self.model_tester.num_stages self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Any = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _lowerCAmelCase : Any = layer_type _lowerCAmelCase : Dict = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase : List[Any] = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(snake_case__ , snake_case__ , snake_case__ , snake_case__={} ): _lowerCAmelCase : List[Any] = model(snake_case__ , return_dict=snake_case__ , **snake_case__ ) _lowerCAmelCase : Tuple = model(snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple() def recursive_check(snake_case__ , snake_case__ ): if isinstance(snake_case__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ): recursive_check(snake_case__ , snake_case__ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(snake_case__ , snake_case__ ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}' ) , ) recursive_check(snake_case__ , snake_case__ ) for model_class in self.all_model_classes: _lowerCAmelCase : Optional[int] = model_class(snake_case__ ) _lowerCAmelCase : str = self._prepare_for_class(snake_case__ , snake_case__ ) _lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) _lowerCAmelCase : Tuple = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) _lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) _lowerCAmelCase : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ ) _lowerCAmelCase : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {'output_hidden_states': True} ) _lowerCAmelCase : List[str] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) _lowerCAmelCase : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {'output_hidden_states': True} ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : List[Any] = TFRegNetModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def lowercase (): """simple docstring""" _lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def a ( self ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _lowerCAmelCase : List[str] = self.default_image_processor _lowerCAmelCase : Tuple = prepare_img() _lowerCAmelCase : Any = image_processor(images=snake_case__ , return_tensors='tf' ) # forward pass _lowerCAmelCase : List[Any] = model(**snake_case__ , training=snake_case__ ) # verify the logits _lowerCAmelCase : str = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) _lowerCAmelCase : Dict = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , snake_case__ , atol=1E-4 )
630
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase : int = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Dict = 'The dog is cute and lives in the garden house' _lowerCAmelCase : List[str] = jnp.array([tokenizer.encode(snake_case__ )] ) _lowerCAmelCase : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _lowerCAmelCase : Tuple = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) _lowerCAmelCase : Union[str, Any] = model(snake_case__ )['last_hidden_state'] self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
630
'''simple docstring''' from collections import Counter from timeit import timeit def lowercase (_A = "" , ): """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2 def lowercase (_A = "" ): """simple docstring""" if len(_A ) == 0: return True _lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string _lowerCAmelCase : dict[str, int] = {} for character in lower_case_input_str: _lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1 _lowerCAmelCase : List[Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def lowercase (_A = "" ): """simple docstring""" print('\nFor string = ' , _A , ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) print( '> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) if __name__ == "__main__": lowerCAmelCase : Tuple = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
630
1
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=0.0 , snake_case__ = None , snake_case__ = "geglu" , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = "layer_norm" , snake_case__ = False , ): '''simple docstring''' super().__init__() _lowerCAmelCase : Any = only_cross_attention _lowerCAmelCase : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero' _lowerCAmelCase : int = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to' F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: _lowerCAmelCase : Any = AdaLayerNorm(snake_case__ , snake_case__ ) elif self.use_ada_layer_norm_zero: _lowerCAmelCase : str = AdaLayerNormZero(snake_case__ , snake_case__ ) else: _lowerCAmelCase : Tuple = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) _lowerCAmelCase : Dict = Attention( query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case__ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. _lowerCAmelCase : Dict = ( AdaLayerNorm(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) ) _lowerCAmelCase : Any = Attention( query_dim=snake_case__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , upcast_attention=snake_case__ , ) # is self-attn if encoder_hidden_states is none else: _lowerCAmelCase : List[Any] = None _lowerCAmelCase : Dict = None # 3. Feed-forward _lowerCAmelCase : Tuple = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) _lowerCAmelCase : int = FeedForward(snake_case__ , dropout=snake_case__ , activation_fn=snake_case__ , final_dropout=snake_case__ ) # let chunk size default to None _lowerCAmelCase : int = None _lowerCAmelCase : Optional[Any] = 0 def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = chunk_size _lowerCAmelCase : List[str] = dim def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , ): '''simple docstring''' if self.use_ada_layer_norm: _lowerCAmelCase : Optional[Any] = self.norma(snake_case__ , snake_case__ ) elif self.use_ada_layer_norm_zero: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = self.norma( snake_case__ , snake_case__ , snake_case__ , hidden_dtype=hidden_states.dtype ) else: _lowerCAmelCase : Optional[int] = self.norma(snake_case__ ) _lowerCAmelCase : Any = cross_attention_kwargs if cross_attention_kwargs is not None else {} _lowerCAmelCase : Any = self.attna( snake_case__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case__ , **snake_case__ , ) if self.use_ada_layer_norm_zero: _lowerCAmelCase : int = gate_msa.unsqueeze(1 ) * attn_output _lowerCAmelCase : List[str] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: _lowerCAmelCase : Optional[int] = ( self.norma(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else self.norma(snake_case__ ) ) _lowerCAmelCase : Optional[int] = self.attna( snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=snake_case__ , **snake_case__ , ) _lowerCAmelCase : List[str] = attn_output + hidden_states # 3. Feed-forward _lowerCAmelCase : str = self.norma(snake_case__ ) if self.use_ada_layer_norm_zero: _lowerCAmelCase : Optional[Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' ) _lowerCAmelCase : List[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size _lowerCAmelCase : str = torch.cat( [self.ff(snake_case__ ) for hid_slice in norm_hidden_states.chunk(snake_case__ , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: _lowerCAmelCase : Union[str, Any] = self.ff(snake_case__ ) if self.use_ada_layer_norm_zero: _lowerCAmelCase : Any = gate_mlp.unsqueeze(1 ) * ff_output _lowerCAmelCase : str = ff_output + hidden_states return hidden_states class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = 4 , snake_case__ = 0.0 , snake_case__ = "geglu" , snake_case__ = False , ): '''simple docstring''' super().__init__() _lowerCAmelCase : int = int(dim * mult ) _lowerCAmelCase : Optional[Any] = dim_out if dim_out is not None else dim if activation_fn == "gelu": _lowerCAmelCase : List[str] = GELU(snake_case__ , snake_case__ ) if activation_fn == "gelu-approximate": _lowerCAmelCase : List[str] = GELU(snake_case__ , snake_case__ , approximate='tanh' ) elif activation_fn == "geglu": _lowerCAmelCase : str = GEGLU(snake_case__ , snake_case__ ) elif activation_fn == "geglu-approximate": _lowerCAmelCase : List[Any] = ApproximateGELU(snake_case__ , snake_case__ ) _lowerCAmelCase : Optional[Any] = nn.ModuleList([] ) # project in self.net.append(snake_case__ ) # project dropout self.net.append(nn.Dropout(snake_case__ ) ) # project out self.net.append(nn.Linear(snake_case__ , snake_case__ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(snake_case__ ) ) def a ( self , snake_case__ ): '''simple docstring''' for module in self.net: _lowerCAmelCase : List[Any] = module(snake_case__ ) return hidden_states class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ = "none" ): '''simple docstring''' super().__init__() _lowerCAmelCase : Dict = nn.Linear(snake_case__ , snake_case__ ) _lowerCAmelCase : str = approximate def a ( self , snake_case__ ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(snake_case__ , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = self.proj(snake_case__ ) _lowerCAmelCase : List[str] = self.gelu(snake_case__ ) return hidden_states class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() _lowerCAmelCase : Union[str, Any] = nn.Linear(snake_case__ , dim_out * 2 ) def a ( self , snake_case__ ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(snake_case__ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.proj(snake_case__ ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(snake_case__ ) class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() _lowerCAmelCase : List[Any] = nn.Linear(snake_case__ , snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.proj(snake_case__ ) return x * torch.sigmoid(1.702 * x ) class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() _lowerCAmelCase : List[str] = nn.Embedding(snake_case__ , snake_case__ ) _lowerCAmelCase : Optional[int] = nn.SiLU() _lowerCAmelCase : Dict = nn.Linear(snake_case__ , embedding_dim * 2 ) _lowerCAmelCase : Any = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = self.linear(self.silu(self.emb(snake_case__ ) ) ) _lowerCAmelCase , _lowerCAmelCase : List[Any] = torch.chunk(snake_case__ , 2 ) _lowerCAmelCase : List[str] = self.norm(snake_case__ ) * (1 + scale) + shift return x class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() _lowerCAmelCase : str = CombinedTimestepLabelEmbeddings(snake_case__ , snake_case__ ) _lowerCAmelCase : List[Any] = nn.SiLU() _lowerCAmelCase : Dict = nn.Linear(snake_case__ , 6 * embedding_dim , bias=snake_case__ ) _lowerCAmelCase : int = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ , eps=1E-6 ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.linear(self.silu(self.emb(snake_case__ , snake_case__ , hidden_dtype=snake_case__ ) ) ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = emb.chunk(6 , dim=1 ) _lowerCAmelCase : Optional[int] = self.norm(snake_case__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = 1E-5 ): '''simple docstring''' super().__init__() _lowerCAmelCase : Optional[int] = num_groups _lowerCAmelCase : Any = eps if act_fn is None: _lowerCAmelCase : Dict = None else: _lowerCAmelCase : Optional[int] = get_activation(snake_case__ ) _lowerCAmelCase : List[str] = nn.Linear(snake_case__ , out_dim * 2 ) def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' if self.act: _lowerCAmelCase : Union[str, Any] = self.act(snake_case__ ) _lowerCAmelCase : str = self.linear(snake_case__ ) _lowerCAmelCase : str = emb[:, :, None, None] _lowerCAmelCase , _lowerCAmelCase : List[str] = emb.chunk(2 , dim=1 ) _lowerCAmelCase : List[str] = F.group_norm(snake_case__ , self.num_groups , eps=self.eps ) _lowerCAmelCase : List[str] = x * (1 + scale) + shift return x
630
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : int = { """facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "data2vec-text" def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Tuple = hidden_size _lowerCAmelCase : Dict = num_hidden_layers _lowerCAmelCase : int = num_attention_heads _lowerCAmelCase : str = hidden_act _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : Any = hidden_dropout_prob _lowerCAmelCase : Optional[int] = attention_probs_dropout_prob _lowerCAmelCase : str = max_position_embeddings _lowerCAmelCase : Any = type_vocab_size _lowerCAmelCase : int = initializer_range _lowerCAmelCase : List[str] = layer_norm_eps _lowerCAmelCase : List[Any] = position_embedding_type _lowerCAmelCase : str = use_cache _lowerCAmelCase : Union[str, Any] = classifier_dropout class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def a ( self ): '''simple docstring''' if self.task == "multiple-choice": _lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
630
1
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowercase (_A ): # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowercase (): """simple docstring""" with parallel_backend('spark' ): assert ParallelBackendConfig.backend_name == "spark" _lowerCAmelCase : Optional[Any] = [1, 2, 3] with pytest.raises(_A ): with parallel_backend('unsupported backend' ): map_nested(_A , _A , num_proc=2 ) with pytest.raises(_A ): with parallel_backend('unsupported backend' ): map_nested(_A , _A , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('num_proc' , [2, -1] ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[Any] = [1, 2] _lowerCAmelCase : Dict = {'a': 1, 'b': 2} _lowerCAmelCase : Optional[Any] = {'a': [1, 2], 'b': [3, 4]} _lowerCAmelCase : List[str] = {'a': {'1': 1}, 'b': 2} _lowerCAmelCase : int = {'a': 1, 'b': 2, 'c': 3, 'd': 4} _lowerCAmelCase : Dict = [2, 3] _lowerCAmelCase : Optional[Any] = {'a': 2, 'b': 3} _lowerCAmelCase : Tuple = {'a': [2, 3], 'b': [4, 5]} _lowerCAmelCase : List[str] = {'a': {'1': 2}, 'b': 3} _lowerCAmelCase : int = {'a': 2, 'b': 3, 'c': 4, 'd': 5} with parallel_backend('spark' ): assert map_nested(_A , _A , num_proc=_A ) == expected_map_nested_sa assert map_nested(_A , _A , num_proc=_A ) == expected_map_nested_sa assert map_nested(_A , _A , num_proc=_A ) == expected_map_nested_sa assert map_nested(_A , _A , num_proc=_A ) == expected_map_nested_sa assert map_nested(_A , _A , num_proc=_A ) == expected_map_nested_sa
630
'''simple docstring''' import pytest import datasets # Import fixture modules as plugins lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""] def lowercase (_A , _A ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def lowercase (_A ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache' _lowerCAmelCase : Dict = test_hf_cache_home / 'datasets' _lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics' _lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) ) _lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) ) _lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) ) @pytest.fixture(autouse=_A , scope='session' ) def lowercase (): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_A ) def lowercase (_A ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A ) @pytest.fixture def lowercase (_A ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
630
1
'''simple docstring''' from __future__ import annotations def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[int] = len(_A ) # We need to create solution object to save path. _lowerCAmelCase : List[str] = [[0 for _ in range(_A )] for _ in range(_A )] _lowerCAmelCase : Optional[int] = run_maze(_A , 0 , 0 , _A ) if solved: print('\n'.join(str(_A ) for row in solutions ) ) else: print('No solution exists!' ) return solved def lowercase (_A , _A , _A , _A ): """simple docstring""" _lowerCAmelCase : List[Any] = len(_A ) # Final check point. if i == j == (size - 1): _lowerCAmelCase : Any = 1 return True _lowerCAmelCase : Dict = (not i < 0) and (not j < 0) # Check lower bounds _lowerCAmelCase : Any = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. _lowerCAmelCase : str = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited _lowerCAmelCase : Union[str, Any] = 1 # check for directions if ( run_maze(_A , i + 1 , _A , _A ) or run_maze(_A , _A , j + 1 , _A ) or run_maze(_A , i - 1 , _A , _A ) or run_maze(_A , _A , j - 1 , _A ) ): return True _lowerCAmelCase : Tuple = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
630
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase : str = logging.get_logger(__name__) # General docstring lowerCAmelCase : Optional[Any] = """RegNetConfig""" # Base docstring lowerCAmelCase : int = """facebook/regnet-y-040""" lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7] # Image classification docstring lowerCAmelCase : Any = """facebook/regnet-y-040""" lowerCAmelCase : Optional[Any] = """tabby, tabby cat""" lowerCAmelCase : Tuple = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , ) _lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) _lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) ) _lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ ) _lowerCAmelCase : int = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : str = config.num_channels _lowerCAmelCase : List[Any] = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) ) _lowerCAmelCase : Tuple = self.embedder(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' ) _lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ ) class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' ) _lowerCAmelCase : str = [ tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.pooler(snake_case__ ) for layer_module in self.attention: _lowerCAmelCase : Tuple = layer_module(snake_case__ ) _lowerCAmelCase : Optional[Any] = hidden_state * pooled return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1 _lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width ) _lowerCAmelCase : Optional[Any] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _lowerCAmelCase : Any = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ), ] _lowerCAmelCase : List[str] = ACTaFN[config.hidden_act] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = hidden_state for layer_module in self.layers: _lowerCAmelCase : int = layer_module(snake_case__ ) _lowerCAmelCase : int = self.shortcut(snake_case__ ) hidden_state += residual _lowerCAmelCase : Tuple = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1 _lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width ) _lowerCAmelCase : Optional[Any] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) _lowerCAmelCase : Tuple = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ), ] _lowerCAmelCase : Tuple = ACTaFN[config.hidden_act] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = hidden_state for layer_module in self.layers: _lowerCAmelCase : List[Any] = layer_module(snake_case__ ) _lowerCAmelCase : Tuple = self.shortcut(snake_case__ ) hidden_state += residual _lowerCAmelCase : str = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer _lowerCAmelCase : Optional[int] = [ # downsampling is done in the first layer with stride of 2 layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ), *[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )], ] def a ( self , snake_case__ ): '''simple docstring''' for layer_module in self.layers: _lowerCAmelCase : int = layer_module(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : str = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) _lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) ) def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ): '''simple docstring''' _lowerCAmelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _lowerCAmelCase : str = hidden_states + (hidden_state,) _lowerCAmelCase : List[str] = stage_module(snake_case__ ) if output_hidden_states: _lowerCAmelCase : Dict = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) @keras_serializable class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" __magic_name__ = RegNetConfig def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = config _lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' ) _lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' ) _lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' ) @unpack_inputs def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' _lowerCAmelCase : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ ) _lowerCAmelCase : List[str] = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) _lowerCAmelCase : List[Any] = encoder_outputs[0] _lowerCAmelCase : Tuple = self.pooler(snake_case__ ) # Change to NCHW output format have uniformity in the modules _lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) _lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = RegNetConfig __magic_name__ = "regnet" __magic_name__ = "pixel_values" @property def a ( self ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} lowerCAmelCase : List[Any] = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ lowerCAmelCase : Dict = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) _lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : str = self.regnet( pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = config.num_labels _lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' ) # classification head _lowerCAmelCase : Optional[int] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : Dict = self.regnet( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) _lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] _lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ ) _lowerCAmelCase : Tuple = self.classifier[1](snake_case__ ) _lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ ) if not return_dict: _lowerCAmelCase : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
630
1
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" _enforce_args(_A , _A ) if n == 0: return 0 _lowerCAmelCase : int = float('-inf' ) for i in range(1 , n + 1 ): _lowerCAmelCase : Optional[Any] = max( _A , prices[i - 1] + naive_cut_rod_recursive(n - i , _A ) ) return max_revue def lowercase (_A , _A ): """simple docstring""" _enforce_args(_A , _A ) _lowerCAmelCase : int = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(_A , _A , _A ) def lowercase (_A , _A , _A ): """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowerCAmelCase : Union[str, Any] = float('-inf' ) for i in range(1 , n + 1 ): _lowerCAmelCase : Any = max( _A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _A , _A ) , ) _lowerCAmelCase : Optional[Any] = max_revenue return max_rev[n] def lowercase (_A , _A ): """simple docstring""" _enforce_args(_A , _A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowerCAmelCase : Union[str, Any] = [float('-inf' ) for _ in range(n + 1 )] _lowerCAmelCase : Optional[Any] = 0 for i in range(1 , n + 1 ): _lowerCAmelCase : int = max_rev[i] for j in range(1 , i + 1 ): _lowerCAmelCase : Tuple = max(_A , prices[j - 1] + max_rev[i - j] ) _lowerCAmelCase : Union[str, Any] = max_revenue_i return max_rev[n] def lowercase (_A , _A ): """simple docstring""" if n < 0: _lowerCAmelCase : Any = f'n must be greater than or equal to 0. Got n = {n}' raise ValueError(_A ) if n > len(_A ): _lowerCAmelCase : Any = ( 'Each integral piece of rod must have a corresponding price. ' f'Got n = {n} but length of prices = {len(_A )}' ) raise ValueError(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = [6, 1_0, 1_2, 1_5, 2_0, 2_3] _lowerCAmelCase : List[Any] = len(_A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowerCAmelCase : int = 3_6 _lowerCAmelCase : Union[str, Any] = top_down_cut_rod(_A , _A ) _lowerCAmelCase : Any = bottom_up_cut_rod(_A , _A ) _lowerCAmelCase : Optional[Any] = naive_cut_rod_recursive(_A , _A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
630
'''simple docstring''' from typing import Any def lowercase (_A ): """simple docstring""" if not input_list: return [] _lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list] _lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
630
1
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Any = """▁""" lowerCAmelCase : List[Any] = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", """tokenizer_config_file""": """tokenizer_config.json""", } lowerCAmelCase : Dict = { """vocab_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""", }, """spm_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_config_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""", }, } lowerCAmelCase : Dict = { """facebook/m2m100_418M""": 10_24, } # fmt: off lowerCAmelCase : Union[str, Any] = { """m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""], """wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""] } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ["input_ids", "attention_mask"] __magic_name__ = [] __magic_name__ = [] def __init__( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="<unk>" , snake_case__="m2m100" , snake_case__ = None , snake_case__=8 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase : str = language_codes _lowerCAmelCase : Optional[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes] _lowerCAmelCase : int = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code} _lowerCAmelCase : Tuple = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(snake_case__ ) for lang_code in fairseq_language_code if self.get_lang_token(snake_case__ ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=snake_case__ , tgt_lang=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , language_codes=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case__ , **snake_case__ , ) _lowerCAmelCase : Optional[Any] = vocab_file _lowerCAmelCase : Any = load_json(snake_case__ ) _lowerCAmelCase : Any = {v: k for k, v in self.encoder.items()} _lowerCAmelCase : Any = spm_file _lowerCAmelCase : int = load_spm(snake_case__ , self.sp_model_kwargs ) _lowerCAmelCase : str = len(self.encoder ) _lowerCAmelCase : Union[str, Any] = { self.get_lang_token(snake_case__ ): self.encoder_size + i for i, lang_code in enumerate(snake_case__ ) } _lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case__ )} _lowerCAmelCase : str = {v: k for k, v in self.lang_token_to_id.items()} _lowerCAmelCase : Any = src_lang if src_lang is not None else 'en' _lowerCAmelCase : Union[str, Any] = tgt_lang _lowerCAmelCase : Dict = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) _lowerCAmelCase : List[Any] = num_madeup_words @property def a ( self ): '''simple docstring''' return len(self.encoder ) + len(self.lang_token_to_id ) @property def a ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(snake_case__ , self.encoder[self.unk_token] ) def a ( self , snake_case__ ): '''simple docstring''' if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(snake_case__ , self.unk_token ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = [] _lowerCAmelCase : List[str] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(snake_case__ ) + token _lowerCAmelCase : Tuple = [] else: current_sub_tokens.append(snake_case__ ) out_string += self.sp_model.decode(snake_case__ ) return out_string.strip() def a ( self , snake_case__ , snake_case__ = None , snake_case__ = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) _lowerCAmelCase : Union[str, Any] = [1] * len(self.prefix_tokens ) _lowerCAmelCase : Optional[int] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.__dict__.copy() _lowerCAmelCase : Dict = None return state def __setstate__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _lowerCAmelCase : List[str] = {} _lowerCAmelCase : Tuple = load_spm(self.spm_file , self.sp_model_kwargs ) def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : Tuple = Path(snake_case__ ) if not save_dir.is_dir(): raise OSError(F'{save_directory} should be a directory' ) _lowerCAmelCase : int = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) _lowerCAmelCase : str = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , snake_case__ ) if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , snake_case__ ) elif not os.path.isfile(self.spm_file ): with open(snake_case__ , 'wb' ) as fi: _lowerCAmelCase : str = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (str(snake_case__ ), str(snake_case__ )) def a ( self , snake_case__ , snake_case__ = "en" , snake_case__ = None , snake_case__ = "ro" , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Any = src_lang _lowerCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ ) def a ( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowerCAmelCase : Union[str, Any] = src_lang _lowerCAmelCase : List[str] = self(snake_case__ , add_special_tokens=snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[int] = self.get_lang_id(snake_case__ ) _lowerCAmelCase : Union[str, Any] = tgt_lang_id return inputs def a ( self ): '''simple docstring''' self.set_src_lang_special_tokens(self.src_lang ) def a ( self ): '''simple docstring''' self.set_tgt_lang_special_tokens(self.tgt_lang ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case__ ) _lowerCAmelCase : List[str] = self.lang_token_to_id[lang_token] _lowerCAmelCase : str = [self.cur_lang_id] _lowerCAmelCase : Tuple = [self.eos_token_id] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case__ ) _lowerCAmelCase : Optional[int] = self.lang_token_to_id[lang_token] _lowerCAmelCase : str = [self.cur_lang_id] _lowerCAmelCase : Dict = [self.eos_token_id] def a ( self , snake_case__ ): '''simple docstring''' return self.lang_code_to_token[lang] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case__ ) return self.lang_token_to_id[lang_token] def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : str = sentencepiece.SentencePieceProcessor(**_A ) spm.Load(str(_A ) ) return spm def lowercase (_A ): """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def lowercase (_A , _A ): """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A , indent=2 )
630
'''simple docstring''' from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
630
1
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 6_5_0, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 6_0_0, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 6_0_0, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=snake_case__ , ) assert hasattr(self , 'env' ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[str] = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}' # distributed data settings _lowerCAmelCase : str = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=snake_case__ , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version='py36' , ) def a ( self , snake_case__ ): '''simple docstring''' TrainingJobAnalytics(snake_case__ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(2,)] ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = self.create_estimator(snake_case__ ) # run training estimator.fit() # result dataframe _lowerCAmelCase : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _lowerCAmelCase : int = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) _lowerCAmelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _lowerCAmelCase : Dict = ( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'{estimator.latest_training_job.name}.json' , 'w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , snake_case__ )
630
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""} lowerCAmelCase : Optional[int] = { """vocab_file""": { """AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""", } } lowerCAmelCase : Union[str, Any] = { """AI-Sweden/gpt-sw3-126m""": 20_48, """AI-Sweden/gpt-sw3-350m""": 20_48, """AI-Sweden/gpt-sw3-1.6b""": 20_48, """AI-Sweden/gpt-sw3-6.7b""": 20_48, """AI-Sweden/gpt-sw3-20b""": 20_48, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase : List[Any] = kwargs.get('name_or_path' ) if name_or_path is None: logger.warning( 'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,' ' you are testing the model, this can safely be ignored' ) _lowerCAmelCase : Any = 'None' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing _lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token _lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: _lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token _lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token else: _lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token _lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token super().__init__( do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) _lowerCAmelCase : Union[str, Any] = do_lower_case _lowerCAmelCase : Optional[int] = remove_space _lowerCAmelCase : Any = keep_accents _lowerCAmelCase : Optional[int] = vocab_file _lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case__ ) # Used for whitespace normalization in input texts # fmt : off _lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '„'} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing _lowerCAmelCase : Optional[Any] = re.compile( F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' ) def __getstate__( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.__dict__.copy() _lowerCAmelCase : int = None return state def __setstate__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _lowerCAmelCase : int = {} _lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def a ( self ): '''simple docstring''' return len(self.sp_model ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ ) # Normalize whitespaces _lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] ) # NFC Unicode normalization _lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ ) return text def a ( self , snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.preprocess_text(snake_case__ ) return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.PieceToId(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.IdToPiece(snake_case__ ) @staticmethod def a ( snake_case__ ): '''simple docstring''' return out_string def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = [] _lowerCAmelCase : Optional[Any] = '' _lowerCAmelCase : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case__ ) + token _lowerCAmelCase : Union[str, Any] = True _lowerCAmelCase : List[Any] = [] else: current_sub_tokens.append(snake_case__ ) _lowerCAmelCase : List[Any] = False out_string += self.sp_model.decode(snake_case__ ) return out_string def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : int = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case__ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case__ , 'wb' ) as fi: _lowerCAmelCase : Any = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (out_vocab_file,) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ ) _lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ ) else: _lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text] _lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ ) if return_tensors is True or return_tensors == "pt": _lowerCAmelCase : int = torch.tensor(snake_case__ ) return token_ids def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.decode(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()] _lowerCAmelCase : str = ( F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:' ) return self.encode(text=snake_case__ )
630
1
'''simple docstring''' import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=5 , snake_case__=4 , snake_case__=64 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = parent _lowerCAmelCase : int = batch_size _lowerCAmelCase : Tuple = seq_length _lowerCAmelCase : int = is_training _lowerCAmelCase : str = use_input_mask _lowerCAmelCase : Dict = use_token_type_ids _lowerCAmelCase : Union[str, Any] = use_labels _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : List[str] = hidden_size _lowerCAmelCase : Any = num_hidden_layers _lowerCAmelCase : List[Any] = num_attention_heads _lowerCAmelCase : List[Any] = intermediate_size _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : Union[str, Any] = hidden_dropout_prob _lowerCAmelCase : List[Any] = attention_probs_dropout_prob _lowerCAmelCase : Optional[Any] = max_position_embeddings _lowerCAmelCase : Any = type_vocab_size _lowerCAmelCase : str = type_sequence_label_size _lowerCAmelCase : Tuple = initializer_range _lowerCAmelCase : Optional[int] = num_labels _lowerCAmelCase : int = num_choices _lowerCAmelCase : List[Any] = scope def a ( self ): '''simple docstring''' return MPNetConfig.from_pretrained('microsoft/mpnet-base' ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase : Optional[Any] = None if self.use_input_mask: _lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase : int = None _lowerCAmelCase : Optional[Any] = None _lowerCAmelCase : List[Any] = None if self.use_labels: _lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a ( self ): '''simple docstring''' return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MPNetModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : List[str] = model(snake_case__ , snake_case__ ) _lowerCAmelCase : Optional[Any] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = MPNetForQuestionAnswering(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Optional[int] = model( snake_case__ , attention_mask=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.num_labels _lowerCAmelCase : Optional[Any] = MPNetForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Optional[int] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.num_choices _lowerCAmelCase : Union[str, Any] = MPNetForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase : Optional[Any] = model( snake_case__ , attention_mask=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.num_labels _lowerCAmelCase : str = MPNetForTokenClassification(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : List[str] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.prepare_config_and_inputs() ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Tuple = config_and_inputs _lowerCAmelCase : Any = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) __magic_name__ = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = True def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = MPNetModelTester(self ) _lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case__ ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = MPNetModel.from_pretrained('microsoft/mpnet-base' ) _lowerCAmelCase : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _lowerCAmelCase : Any = model(snake_case__ )[0] _lowerCAmelCase : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case__ ) _lowerCAmelCase : Dict = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
630
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = (DDPMScheduler,) def a ( self , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**snake_case__ ) return config def a ( self ): '''simple docstring''' for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def a ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def a ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def a ( self ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=snake_case__ ) def a ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case__ ) def a ( self ): '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , ) def a ( self ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def a ( self ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[Any] = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[Any] = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = len(snake_case__ ) _lowerCAmelCase : str = self.dummy_model() _lowerCAmelCase : str = self.dummy_sample_deter _lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 ) for t in reversed(range(snake_case__ ) ): # 1. predict noise residual _lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance _lowerCAmelCase : Dict = pred_prev_sample _lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case__ ) ) _lowerCAmelCase : List[str] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.scheduler_classes[0] _lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='v_prediction' ) _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = len(snake_case__ ) _lowerCAmelCase : Any = self.dummy_model() _lowerCAmelCase : Tuple = self.dummy_sample_deter _lowerCAmelCase : Optional[int] = torch.manual_seed(0 ) for t in reversed(range(snake_case__ ) ): # 1. predict noise residual _lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance _lowerCAmelCase : Tuple = pred_prev_sample _lowerCAmelCase : Any = torch.sum(torch.abs(snake_case__ ) ) _lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.scheduler_classes[0] _lowerCAmelCase : Optional[int] = self.get_scheduler_config() _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=snake_case__ ) _lowerCAmelCase : Union[str, Any] = scheduler.timesteps for i, timestep in enumerate(snake_case__ ): if i == len(snake_case__ ) - 1: _lowerCAmelCase : str = -1 else: _lowerCAmelCase : Optional[Any] = timesteps[i + 1] _lowerCAmelCase : int = scheduler.previous_timestep(snake_case__ ) _lowerCAmelCase : int = prev_t.item() self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : Tuple = self.get_scheduler_config() _lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(snake_case__ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : List[str] = self.get_scheduler_config() _lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = [100, 87, 50, 1, 0] _lowerCAmelCase : int = len(snake_case__ ) with self.assertRaises(snake_case__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : int = self.get_scheduler_config() _lowerCAmelCase : Any = scheduler_class(**snake_case__ ) _lowerCAmelCase : Any = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=snake_case__ )
630
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu lowerCAmelCase : int = [ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def lowercase (_A , _A=None , _A=None , _A=None ): """simple docstring""" _lowerCAmelCase : Optional[Any] = True while ask_again: _lowerCAmelCase : Dict = input(_A ) try: if default is not None and len(_A ) == 0: return default return convert_value(_A ) if convert_value is not None else result except Exception: if error_message is not None: print(_A ) def lowercase (_A , _A=[] , _A=None , _A=0 ): """simple docstring""" _lowerCAmelCase : Dict = BulletMenu(_A , _A ) _lowerCAmelCase : Optional[int] = menu.run(default_choice=_A ) return convert_value(_A ) if convert_value is not None else result def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[int] = int(_A ) return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : str = int(_A ) return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[int] = int(_A ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[int] = int(_A ) return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = int(_A ) return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] ) def lowercase (_A ): """simple docstring""" return {"yes": True, "no": False}[value.lower()] class UpperCamelCase__ ( argparse.RawDescriptionHelpFormatter ): """simple docstring""" def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[str] = super()._format_usage(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) _lowerCAmelCase : int = usage.replace('<command> [<args>] ' , '' ) return usage
630
'''simple docstring''' import socket def lowercase (): """simple docstring""" _lowerCAmelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _lowerCAmelCase : Optional[int] = socket.gethostname() _lowerCAmelCase : Any = 1_2_3_1_2 sock.connect((host, port) ) sock.send(B'Hello server!' ) with open('Received_file' , 'wb' ) as out_file: print('File opened' ) print('Receiving data...' ) while True: _lowerCAmelCase : Union[str, Any] = sock.recv(1_0_2_4 ) if not data: break out_file.write(_A ) print('Successfully received the file' ) sock.close() print('Connection closed' ) if __name__ == "__main__": main()
630
1
'''simple docstring''' import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=4 , ): '''simple docstring''' _lowerCAmelCase : List[Any] = parent _lowerCAmelCase : Union[str, Any] = batch_size _lowerCAmelCase : Union[str, Any] = seq_length _lowerCAmelCase : Any = is_training _lowerCAmelCase : List[Any] = use_attention_mask _lowerCAmelCase : Any = use_token_type_ids _lowerCAmelCase : Optional[Any] = use_labels _lowerCAmelCase : Tuple = vocab_size _lowerCAmelCase : Union[str, Any] = hidden_size _lowerCAmelCase : int = num_hidden_layers _lowerCAmelCase : List[Any] = num_attention_heads _lowerCAmelCase : Union[str, Any] = intermediate_size _lowerCAmelCase : List[Any] = hidden_act _lowerCAmelCase : List[Any] = hidden_dropout_prob _lowerCAmelCase : int = attention_probs_dropout_prob _lowerCAmelCase : int = max_position_embeddings _lowerCAmelCase : Optional[int] = type_vocab_size _lowerCAmelCase : Dict = type_sequence_label_size _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : Optional[Any] = num_choices def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase : Dict = None if self.use_attention_mask: _lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase : Any = None if self.use_token_type_ids: _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase : Tuple = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = config_and_inputs _lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = config_and_inputs _lowerCAmelCase : Optional[Any] = True _lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = True __magic_name__ = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = FlaxBertModelTester(self ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = FlaxBertModel.from_pretrained('bert-base-cased' ) _lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case__ )
630
'''simple docstring''' import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel lowerCAmelCase : Tuple = False lowerCAmelCase : str = True lowerCAmelCase : List[Any] = False if __name__ == "__main__": lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--repo_path""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") lowerCAmelCase : Optional[int] = parser.parse_args() lowerCAmelCase : int = { """image_size""": """sample_size""", """num_res_blocks""": """layers_per_block""", """block_channels""": """block_out_channels""", """down_blocks""": """down_block_types""", """up_blocks""": """up_block_types""", """downscale_freq_shift""": """freq_shift""", """resnet_num_groups""": """norm_num_groups""", """resnet_act_fn""": """act_fn""", """resnet_eps""": """norm_eps""", """num_head_channels""": """attention_head_dim""", } lowerCAmelCase : int = { """time_steps""": """time_proj""", """mid""": """mid_block""", """downsample_blocks""": """down_blocks""", """upsample_blocks""": """up_blocks""", } lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet""" with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader: lowerCAmelCase : int = reader.read() lowerCAmelCase : List[str] = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, """config.json"""): lowerCAmelCase : str = UNetaDModel(**config) else: lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel lowerCAmelCase : Dict = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) lowerCAmelCase : Union[str, Any] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: lowerCAmelCase : str = config[key] del config[key] lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]] lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]] if do_only_weights: lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin""")) lowerCAmelCase : str = {} for param_key, param_value in state_dict.items(): if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""): continue lowerCAmelCase : str = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(""".""")[0] == key: lowerCAmelCase : Dict = param_value lowerCAmelCase : Tuple = True if not has_changed: lowerCAmelCase : Tuple = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
630
1
'''simple docstring''' # using dfs for finding eulerian path traversal def lowercase (_A , _A , _A , _A=None ): """simple docstring""" _lowerCAmelCase : int = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = True, True _lowerCAmelCase : int = dfs(_A , _A , _A , _A ) return path def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 _lowerCAmelCase : Tuple = -1 for i in range(_A ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 _lowerCAmelCase : List[str] = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Optional[Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] _lowerCAmelCase , _lowerCAmelCase : Dict = check_circuit_or_path(_A , _A ) if check == 3: print('graph is not Eulerian' ) print('no path' ) return _lowerCAmelCase : Tuple = 1 if check == 2: _lowerCAmelCase : int = odd_node print('graph has a Euler path' ) if check == 1: print('graph has a Euler cycle' ) _lowerCAmelCase : Optional[Any] = dfs(_A , _A , _A ) print(_A ) def lowercase (): """simple docstring""" _lowerCAmelCase : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} _lowerCAmelCase : Dict = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} _lowerCAmelCase : int = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} _lowerCAmelCase : int = {1: [2, 3], 2: [1, 3], 3: [1, 2]} _lowerCAmelCase : List[Any] = { 1: [], 2: [] # all degree is zero } _lowerCAmelCase : int = 1_0 check_euler(_A , _A ) check_euler(_A , _A ) check_euler(_A , _A ) check_euler(_A , _A ) check_euler(_A , _A ) if __name__ == "__main__": main()
630
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ): '''simple docstring''' super().__init__() _lowerCAmelCase : Union[str, Any] = pad_token_id _lowerCAmelCase : List[Any] = max_length _lowerCAmelCase : Tuple = vocab _lowerCAmelCase : str = merges _lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ ) @classmethod def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()] _lowerCAmelCase : Any = tokenizer.get_vocab() return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ ) @classmethod def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ ) return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ ) @classmethod def a ( cls , snake_case__ ): '''simple docstring''' return cls(**snake_case__ ) def a ( self ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' _lowerCAmelCase : str = self.tf_tokenizer(snake_case__ ) _lowerCAmelCase : str = tf.ones_like(snake_case__ ) if self.pad_token_id is not None: # pad the tokens up to max length _lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: _lowerCAmelCase , _lowerCAmelCase : str = pad_model_inputs( snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
630
1
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : str = [0] * len(_A ) for i in range(1 , len(_A ) ): # use last results for better performance - dynamic programming _lowerCAmelCase : str = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: _lowerCAmelCase : Optional[int] = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 _lowerCAmelCase : Dict = j return prefix_result def lowercase (_A ): """simple docstring""" return max(prefix_function(_A ) ) if __name__ == "__main__": import doctest doctest.testmod()
630
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Optional[int] = { """configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""], """feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""], """processing_mctct""": ["""MCTCTProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MCTCTForCTC""", """MCTCTModel""", """MCTCTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCAmelCase : str = logging.getLogger(__name__) def lowercase (_A , _A ): """simple docstring""" if os.path.exists(_A ): if os.path.exists(os.path.join(_A , 'config.json' ) ) and os.path.isfile( os.path.join(_A , 'config.json' ) ): os.remove(os.path.join(_A , 'config.json' ) ) if os.path.exists(os.path.join(_A , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(_A , 'pytorch_model.bin' ) ): os.remove(os.path.join(_A , 'pytorch_model.bin' ) ) else: os.makedirs(_A ) model.save_pretrained(_A ) def lowercase (_A , _A=False ): """simple docstring""" _lowerCAmelCase : Tuple = 2 if unlogit: _lowerCAmelCase : Union[str, Any] = torch.pow(_A , _A ) _lowerCAmelCase : List[Any] = p * torch.log(_A ) _lowerCAmelCase : List[str] = 0 return -plogp.sum(dim=-1 ) def lowercase (_A ): """simple docstring""" logger.info('lv, h >\t' + '\t'.join(f'{x + 1}' for x in range(len(_A ) ) ) ) for row in range(len(_A ) ): if tensor.dtype != torch.long: logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:.5f}' for x in tensor[row].cpu().data ) ) else: logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:d}' for x in tensor[row].cpu().data ) ) def lowercase (_A , _A , _A , _A=True , _A=True , _A=None , _A=False ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads _lowerCAmelCase : Union[str, Any] = torch.zeros(_A , _A ).to(args.device ) _lowerCAmelCase : List[str] = torch.zeros(_A , _A ).to(args.device ) if head_mask is None: _lowerCAmelCase : Optional[Any] = torch.ones(_A , _A ).to(args.device ) head_mask.requires_grad_(requires_grad=_A ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _lowerCAmelCase : Any = None _lowerCAmelCase : Any = 0.0 _lowerCAmelCase : List[str] = 0.0 for step, inputs in enumerate(tqdm(_A , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): _lowerCAmelCase : Optional[int] = tuple(t.to(args.device ) for t in inputs ) ((_lowerCAmelCase) , ) : Optional[int] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _lowerCAmelCase : str = model(_A , labels=_A , head_mask=_A ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_A ): _lowerCAmelCase : Optional[int] = entropy(attn.detach() , _A ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_A ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _lowerCAmelCase : Tuple = 2 _lowerCAmelCase : Tuple = torch.pow(torch.pow(_A , _A ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _lowerCAmelCase : Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(_A ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(_A ) logger.info('Head ranked by importance scores' ) _lowerCAmelCase : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _lowerCAmelCase : Union[str, Any] = torch.arange( head_importance.numel() , device=args.device ) _lowerCAmelCase : Tuple = head_ranks.view_as(_A ) print_ad_tensor(_A ) return attn_entropy, head_importance, total_loss def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = compute_heads_importance(_A , _A , _A , compute_entropy=_A ) _lowerCAmelCase : int = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , _A , original_score * args.masking_threshold ) _lowerCAmelCase : List[Any] = torch.ones_like(_A ) _lowerCAmelCase : Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _lowerCAmelCase : Optional[Any] = original_score while current_score >= original_score * args.masking_threshold: _lowerCAmelCase : Dict = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _lowerCAmelCase : int = float('Inf' ) _lowerCAmelCase : int = head_importance.view(-1 ).sort()[1] if len(_A ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads _lowerCAmelCase : Optional[int] = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) _lowerCAmelCase : Dict = new_head_mask.view(-1 ) _lowerCAmelCase : List[Any] = 0.0 _lowerCAmelCase : Optional[Any] = new_head_mask.view_as(_A ) _lowerCAmelCase : Union[str, Any] = new_head_mask.clone().detach() print_ad_tensor(_A ) # Compute metric and head importance again _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = compute_heads_importance( _A , _A , _A , compute_entropy=_A , head_mask=_A ) _lowerCAmelCase : List[str] = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , _A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , ) logger.info('Final head mask' ) print_ad_tensor(_A ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def lowercase (_A , _A , _A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = datetime.now() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = compute_heads_importance( _A , _A , _A , compute_entropy=_A , compute_importance=_A , head_mask=_A ) _lowerCAmelCase : Dict = 1 / loss _lowerCAmelCase : Optional[int] = datetime.now() - before_time _lowerCAmelCase : Dict = sum(p.numel() for p in model.parameters() ) _lowerCAmelCase : int = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_A ) ) } for k, v in heads_to_prune.items(): if isinstance(_A , _A ): _lowerCAmelCase : Dict = [ v, ] assert sum(len(_A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_A ) _lowerCAmelCase : Dict = sum(p.numel() for p in model.parameters() ) _lowerCAmelCase : Union[str, Any] = datetime.now() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = compute_heads_importance( _A , _A , _A , compute_entropy=_A , compute_importance=_A , head_mask=_A , actually_pruned=_A , ) _lowerCAmelCase : Dict = 1 / loss _lowerCAmelCase : Tuple = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _A , _A , pruned_num_params / original_num_params * 1_0_0 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , _A , _A ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 ) save_model(_A , args.output_dir ) def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=_A , type=_A , required=_A , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=_A , type=_A , required=_A , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=_A , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=_A , type=_A , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=_A , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=_A , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=_A , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=_A , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_2_8 , type=_A , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=_A , help='Batch size.' ) parser.add_argument('--seed' , type=_A , default=4_2 ) parser.add_argument('--local_rank' , type=_A , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=_A , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=_A , default='' , help='Can be used for distant debugging.' ) _lowerCAmelCase : Union[str, Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_A ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _lowerCAmelCase : Dict = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) _lowerCAmelCase : Union[str, Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _lowerCAmelCase : List[str] = torch.device('cuda' , args.local_rank ) _lowerCAmelCase : Any = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _lowerCAmelCase : str = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _lowerCAmelCase : Any = nn.parallel.DistributedDataParallel( _A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_A ) elif args.n_gpu > 1: _lowerCAmelCase : int = nn.DataParallel(_A ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_A ) torch.save(_A , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , _A ) # Prepare dataset _lowerCAmelCase : List[str] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _lowerCAmelCase : Dict = (torch.from_numpy(_A ),) _lowerCAmelCase : str = TensorDataset(*_A ) _lowerCAmelCase : Dict = RandomSampler(_A ) _lowerCAmelCase : Any = DataLoader(_A , sampler=_A , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_A , _A , _A ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _lowerCAmelCase : Union[str, Any] = mask_heads(_A , _A , _A ) prune_heads(_A , _A , _A , _A ) if __name__ == "__main__": main()
630
'''simple docstring''' lowerCAmelCase : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def lowercase (_A ): """simple docstring""" _lowerCAmelCase : str = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0] number //= 1_0_0_0_0_0 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowerCAmelCase : list[bool | None] = [None] * 10_00_00_00 lowerCAmelCase : List[str] = True lowerCAmelCase : Union[str, Any] = False def lowercase (_A ): """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore _lowerCAmelCase : Any = chain(next_number(_A ) ) _lowerCAmelCase : List[str] = number_chain while number < 1_0_0_0_0_0_0_0: _lowerCAmelCase : Tuple = number_chain number *= 1_0 return number_chain def lowercase (_A = 1_0_0_0_0_0_0_0 ): """simple docstring""" for i in range(1 , _A ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(_A ) if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution() = }''')
630
1
'''simple docstring''' from cva import destroyAllWindows, imread, imshow, waitKey def lowercase (_A ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase : Dict = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_A ): for j in range(_A ): _lowerCAmelCase : str = [2_5_5, 2_5_5, 2_5_5] - img[i][j] return img if __name__ == "__main__": # read original image lowerCAmelCase : str = imread("""image_data/lena.jpg""", 1) # convert to its negative lowerCAmelCase : Any = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
630
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__="swish" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , snake_case__=0.25 , snake_case__=0.0 , snake_case__=0.0 , ): '''simple docstring''' _lowerCAmelCase : Tuple = parent _lowerCAmelCase : Optional[int] = batch_size _lowerCAmelCase : List[Any] = image_size _lowerCAmelCase : List[Any] = patch_size _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 ) _lowerCAmelCase : Optional[Any] = hidden_act _lowerCAmelCase : List[Any] = conv_kernel_size _lowerCAmelCase : Optional[Any] = output_stride _lowerCAmelCase : List[Any] = classifier_dropout_prob _lowerCAmelCase : str = use_labels _lowerCAmelCase : List[str] = is_training _lowerCAmelCase : Optional[int] = num_labels _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : str = scope _lowerCAmelCase : Any = width_multiplier _lowerCAmelCase : Union[str, Any] = ffn_dropout _lowerCAmelCase : Optional[int] = attn_dropout def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : Optional[Any] = None _lowerCAmelCase : Dict = None if self.use_labels: _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def a ( self ): '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = MobileViTVaModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : str = model(snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.num_labels _lowerCAmelCase : List[Any] = MobileViTVaForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Dict = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs _lowerCAmelCase : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) __magic_name__ = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : int = MobileViTVaModelTester(self ) _lowerCAmelCase : Dict = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def a ( self ): '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def a ( self ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def a ( self ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def a ( self ): '''simple docstring''' pass def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : str = model_class(snake_case__ ) _lowerCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : int = [*signature.parameters.keys()] _lowerCAmelCase : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ): _lowerCAmelCase : Dict = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): _lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) _lowerCAmelCase : List[str] = outputs.hidden_states _lowerCAmelCase : List[str] = 5 self.assertEqual(len(snake_case__ ) , snake_case__ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _lowerCAmelCase : List[Any] = 2 for i in range(len(snake_case__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Optional[int] = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase : Any = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) @slow def a ( self ): '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Dict = MobileViTVaModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def lowercase (): """simple docstring""" _lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def a ( self ): '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( snake_case__ ) _lowerCAmelCase : str = self.default_image_processor _lowerCAmelCase : Any = prepare_img() _lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : Tuple = model(**snake_case__ ) # verify the logits _lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) _lowerCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : Any = model.to(snake_case__ ) _lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : Optional[int] = prepare_img() _lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : int = model(**snake_case__ ) _lowerCAmelCase : Dict = outputs.logits # verify the logits _lowerCAmelCase : str = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , snake_case__ ) _lowerCAmelCase : Any = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : List[Any] = model.to(snake_case__ ) _lowerCAmelCase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _lowerCAmelCase : Tuple = prepare_img() _lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : Any = model(**snake_case__ ) _lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu() _lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] ) _lowerCAmelCase : List[Any] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , snake_case__ ) _lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) _lowerCAmelCase : Tuple = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , snake_case__ )
630
1
'''simple docstring''' import baseaa def lowercase (_A ): """simple docstring""" return baseaa.baaencode(string.encode('utf-8' ) ) def lowercase (_A ): """simple docstring""" return baseaa.baadecode(_A ).decode('utf-8' ) if __name__ == "__main__": lowerCAmelCase : Optional[int] = """Hello World!""" lowerCAmelCase : Union[str, Any] = baseaa_encode(test) print(encoded) lowerCAmelCase : Any = baseaa_decode(encoded) print(decoded)
630
'''simple docstring''' import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' ) _lowerCAmelCase : Dict = 'The dog is cute and lives in the garden house' _lowerCAmelCase : List[str] = jnp.array([tokenizer.encode(snake_case__ )] ) _lowerCAmelCase : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _lowerCAmelCase : Tuple = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) _lowerCAmelCase : Union[str, Any] = model(snake_case__ )['last_hidden_state'] self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
630
1
'''simple docstring''' from typing import Any def lowercase (_A ): """simple docstring""" if not input_list: return [] _lowerCAmelCase : Optional[int] = [input_list.count(_A ) for value in input_list] _lowerCAmelCase : int = max(_A ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
630
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Dict = len(_A ) while cur > 1: # Find the maximum number in arr _lowerCAmelCase : int = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _lowerCAmelCase : Dict = arr[mi::-1] + arr[mi + 1 : len(_A )] # Reverse whole list _lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(_A )] cur -= 1 return arr if __name__ == "__main__": lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
630
1
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ): '''simple docstring''' _lowerCAmelCase : Dict = parent _lowerCAmelCase : Tuple = batch_size _lowerCAmelCase : str = seq_length _lowerCAmelCase : Tuple = is_training _lowerCAmelCase : Optional[int] = use_input_mask _lowerCAmelCase : Any = use_token_type_ids _lowerCAmelCase : List[str] = use_labels _lowerCAmelCase : Optional[Any] = vocab_size _lowerCAmelCase : Optional[Any] = hidden_size _lowerCAmelCase : Optional[Any] = num_hidden_layers _lowerCAmelCase : Tuple = num_attention_heads _lowerCAmelCase : str = intermediate_size _lowerCAmelCase : Tuple = hidden_act _lowerCAmelCase : List[str] = hidden_dropout_prob _lowerCAmelCase : str = attention_probs_dropout_prob _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Tuple = type_vocab_size _lowerCAmelCase : List[str] = type_sequence_label_size _lowerCAmelCase : Union[str, Any] = initializer_range _lowerCAmelCase : Union[str, Any] = num_labels _lowerCAmelCase : Optional[int] = num_choices _lowerCAmelCase : int = scope def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase : Optional[int] = None if self.use_input_mask: _lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase : Tuple = None if self.use_token_type_ids: _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase : Tuple = None _lowerCAmelCase : Union[str, Any] = None _lowerCAmelCase : List[Any] = None if self.use_labels: _lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a ( self ): '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = LlamaModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Any = model(snake_case__ , attention_mask=snake_case__ ) _lowerCAmelCase : int = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Tuple = True _lowerCAmelCase : Union[str, Any] = LlamaModel(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : List[Any] = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , ) _lowerCAmelCase : Dict = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , ) _lowerCAmelCase : Any = model(snake_case__ , attention_mask=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = LlamaForCausalLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' _lowerCAmelCase : int = True _lowerCAmelCase : Union[str, Any] = True _lowerCAmelCase : Any = LlamaForCausalLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() # first forward pass _lowerCAmelCase : Dict = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , ) _lowerCAmelCase : Any = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCAmelCase : int = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _lowerCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) _lowerCAmelCase : Dict = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0] _lowerCAmelCase : List[Any] = model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0] # select random slice _lowerCAmelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() _lowerCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : str = config_and_inputs _lowerCAmelCase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __magic_name__ = (LlamaForCausalLM,) if is_torch_available() else () __magic_name__ = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = LlamaModelTester(self ) _lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def a ( self ): '''simple docstring''' self.config_tester.run_common_tests() def a ( self ): '''simple docstring''' _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase : int = type self.model_tester.create_and_check_model(*snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Union[str, Any] = 3 _lowerCAmelCase : Optional[Any] = input_dict['input_ids'] _lowerCAmelCase : str = input_ids.ne(1 ).to(snake_case__ ) _lowerCAmelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Tuple = 3 _lowerCAmelCase : Union[str, Any] = 'single_label_classification' _lowerCAmelCase : List[str] = input_dict['input_ids'] _lowerCAmelCase : Tuple = input_ids.ne(1 ).to(snake_case__ ) _lowerCAmelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _lowerCAmelCase : List[Any] = LlamaForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Tuple = 3 _lowerCAmelCase : Optional[int] = 'multi_label_classification' _lowerCAmelCase : List[Any] = input_dict['input_ids'] _lowerCAmelCase : Optional[Any] = input_ids.ne(1 ).to(snake_case__ ) _lowerCAmelCase : Tuple = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _lowerCAmelCase : Any = LlamaForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _lowerCAmelCase : Optional[int] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def a ( self ): '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase : Union[str, Any] = ids_tensor([1, 10] , config.vocab_size ) _lowerCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _lowerCAmelCase : List[Any] = LlamaModel(snake_case__ ) original_model.to(snake_case__ ) original_model.eval() _lowerCAmelCase : List[str] = original_model(snake_case__ ).last_hidden_state _lowerCAmelCase : List[Any] = original_model(snake_case__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _lowerCAmelCase : Tuple = {'type': scaling_type, 'factor': 10.0} _lowerCAmelCase : Optional[Any] = LlamaModel(snake_case__ ) scaled_model.to(snake_case__ ) scaled_model.eval() _lowerCAmelCase : Any = scaled_model(snake_case__ ).last_hidden_state _lowerCAmelCase : Dict = scaled_model(snake_case__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338] _lowerCAmelCase : int = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) _lowerCAmelCase : Union[str, Any] = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 _lowerCAmelCase : Any = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off _lowerCAmelCase : Any = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , snake_case__ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338] _lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) _lowerCAmelCase : str = model(torch.tensor(snake_case__ ) ) # Expected mean on dim = -1 _lowerCAmelCase : Tuple = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off _lowerCAmelCase : Dict = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , snake_case__ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338] _lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) _lowerCAmelCase : Union[str, Any] = model(torch.tensor(snake_case__ ) ) # Expected mean on dim = -1 _lowerCAmelCase : Any = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off _lowerCAmelCase : Union[str, Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1E-2 , rtol=1E-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338] _lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) _lowerCAmelCase : List[Any] = model(torch.tensor(snake_case__ ) ) _lowerCAmelCase : List[Any] = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1E-2 , rtol=1E-2 ) # fmt: off _lowerCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , snake_case__ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Model is curently gated' ) @slow def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' _lowerCAmelCase : Dict = 'Simply put, the theory of relativity states that ' _lowerCAmelCase : List[str] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) _lowerCAmelCase : List[Any] = tokenizer.encode(snake_case__ , return_tensors='pt' ) _lowerCAmelCase : str = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=snake_case__ ) # greedy generation outputs _lowerCAmelCase : List[Any] = model.generate(snake_case__ , max_new_tokens=64 , top_p=snake_case__ , temperature=1 , do_sample=snake_case__ ) _lowerCAmelCase : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ )
630
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : str = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "gptj" __magic_name__ = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : int = vocab_size _lowerCAmelCase : Optional[int] = n_positions _lowerCAmelCase : Optional[int] = n_embd _lowerCAmelCase : Optional[int] = n_layer _lowerCAmelCase : str = n_head _lowerCAmelCase : Tuple = n_inner _lowerCAmelCase : Tuple = rotary_dim _lowerCAmelCase : Optional[int] = activation_function _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : List[str] = embd_pdrop _lowerCAmelCase : int = attn_pdrop _lowerCAmelCase : Any = layer_norm_epsilon _lowerCAmelCase : Optional[int] = initializer_range _lowerCAmelCase : List[str] = use_cache _lowerCAmelCase : Dict = bos_token_id _lowerCAmelCase : Any = eos_token_id super().__init__( bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ ) if not getattr(self._config , 'pad_token_id' , snake_case__ ): # TODO: how to do that better? _lowerCAmelCase : Any = 0 @property def a ( self ): '''simple docstring''' _lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(snake_case__ , direction='inputs' ) _lowerCAmelCase : int = {0: 'batch', 1: 'past_sequence + sequence'} else: _lowerCAmelCase : int = {0: 'batch', 1: 'sequence'} return common_inputs @property def a ( self ): '''simple docstring''' return self._config.n_layer @property def a ( self ): '''simple docstring''' return self._config.n_head def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = super(snake_case__ , self ).generate_dummy_inputs( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) # We need to order the input in the way they appears in the forward() _lowerCAmelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs['input_ids'].shape # Not using the same length for past_key_values _lowerCAmelCase : Any = seqlen + 2 _lowerCAmelCase : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _lowerCAmelCase : Tuple = [ (torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers ) ] _lowerCAmelCase : Tuple = common_inputs['attention_mask'] if self.use_past: _lowerCAmelCase : Any = ordered_inputs['attention_mask'].dtype _lowerCAmelCase : Union[str, Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 ) return ordered_inputs @property def a ( self ): '''simple docstring''' return 13
630
1
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCAmelCase : Dict = 16 lowerCAmelCase : Optional[int] = 32 def lowercase (_A , _A = 1_6 , _A = "bert-base-cased" ): """simple docstring""" _lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(_A ) _lowerCAmelCase : Tuple = load_dataset('glue' , 'mrpc' ) def tokenize_function(_A ): # max_length=None => use the model max length (it's actually the default) _lowerCAmelCase : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_A , max_length=_A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _lowerCAmelCase : List[Any] = datasets.map( _A , batched=_A , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_A ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCAmelCase : List[str] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_A ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_A , padding='max_length' , max_length=1_2_8 , return_tensors='pt' ) return tokenizer.pad(_A , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. _lowerCAmelCase : Tuple = DataLoader( tokenized_datasets['train'] , shuffle=_A , collate_fn=_A , batch_size=_A ) _lowerCAmelCase : str = DataLoader( tokenized_datasets['validation'] , shuffle=_A , collate_fn=_A , batch_size=_A ) return train_dataloader, eval_dataloader def lowercase (_A , _A , _A , _A ): """simple docstring""" model.eval() _lowerCAmelCase : List[str] = 0 for step, batch in enumerate(_A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCAmelCase : Any = model(**_A ) _lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _lowerCAmelCase , _lowerCAmelCase : List[Any] = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_A ) - 1: _lowerCAmelCase : List[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] _lowerCAmelCase : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_A , references=_A , ) _lowerCAmelCase : List[str] = metric.compute() return eval_metric["accuracy"] def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Optional[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCAmelCase : str = config['lr'] _lowerCAmelCase : int = int(config['num_epochs'] ) _lowerCAmelCase : Tuple = int(config['seed'] ) _lowerCAmelCase : int = int(config['batch_size'] ) _lowerCAmelCase : int = args.model_name_or_path set_seed(_A ) _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_dataloaders(_A , _A , _A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCAmelCase : int = AutoModelForSequenceClassification.from_pretrained(_A , return_dict=_A ) # Instantiate optimizer _lowerCAmelCase : List[str] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _lowerCAmelCase : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_A ) if accelerator.state.deepspeed_plugin is not None: _lowerCAmelCase : str = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: _lowerCAmelCase : str = 1 _lowerCAmelCase : List[Any] = (len(_A ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _lowerCAmelCase : str = get_linear_schedule_with_warmup( optimizer=_A , num_warmup_steps=0 , num_training_steps=_A , ) else: _lowerCAmelCase : Any = DummyScheduler(_A , total_num_steps=_A , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = accelerator.prepare( _A , _A , _A , _A , _A ) # We need to keep track of how many total steps we have iterated over _lowerCAmelCase : List[Any] = 0 # We also need to keep track of the stating epoch so files are named properly _lowerCAmelCase : Optional[Any] = 0 _lowerCAmelCase : Optional[Any] = evaluate.load('glue' , 'mrpc' ) _lowerCAmelCase : List[Any] = num_epochs if args.partial_train_epoch is not None: _lowerCAmelCase : Any = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) _lowerCAmelCase : str = args.resume_from_checkpoint.split('epoch_' )[1] _lowerCAmelCase : str = '' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break _lowerCAmelCase : Any = int(_A ) + 1 _lowerCAmelCase : str = evaluation_loop(_A , _A , _A , _A ) accelerator.print('resumed checkpoint performance:' , _A ) accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] ) accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] ) with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , 'r' ) as f: _lowerCAmelCase : Tuple = json.load(_A ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model _lowerCAmelCase : Tuple = {} for epoch in range(_A , _A ): model.train() for step, batch in enumerate(_A ): _lowerCAmelCase : Any = model(**_A ) _lowerCAmelCase : str = outputs.loss _lowerCAmelCase : Any = loss / gradient_accumulation_steps accelerator.backward(_A ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 _lowerCAmelCase : Dict = f'epoch_{epoch}' _lowerCAmelCase : List[Any] = os.path.join(args.output_dir , _A ) accelerator.save_state(_A ) _lowerCAmelCase : List[str] = evaluation_loop(_A , _A , _A , _A ) _lowerCAmelCase : str = accuracy _lowerCAmelCase : Union[str, Any] = lr_scheduler.get_lr()[0] _lowerCAmelCase : Optional[int] = optimizer.param_groups[0]['lr'] _lowerCAmelCase : Dict = epoch _lowerCAmelCase : Optional[int] = overall_step accelerator.print(f'epoch {epoch}:' , _A ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , 'w' ) as f: json.dump(_A , _A ) def lowercase (): """simple docstring""" _lowerCAmelCase : List[str] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_A , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_A , ) parser.add_argument( '--output_dir' , type=_A , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=_A , default=_A , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--partial_train_epoch' , type=_A , default=_A , help='If passed, the training will stop after this number of epochs.' , ) parser.add_argument( '--num_epochs' , type=_A , default=2 , help='Number of train epochs.' , ) _lowerCAmelCase : int = parser.parse_args() _lowerCAmelCase : List[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6} training_function(_A , _A ) if __name__ == "__main__": main()
630
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Any = { """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase : Optional[Any] = { """configuration_poolformer""": [ """POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PoolFormerConfig""", """PoolFormerOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = ["""PoolFormerFeatureExtractor"""] lowerCAmelCase : List[Any] = ["""PoolFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PoolFormerForImageClassification""", """PoolFormerModel""", """PoolFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
630
'''simple docstring''' import math from datetime import datetime, timedelta def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[Any] = year % 1_9 _lowerCAmelCase : Any = year % 4 _lowerCAmelCase : Optional[int] = year % 7 _lowerCAmelCase : int = math.floor(year / 1_0_0 ) _lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) _lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4 _lowerCAmelCase : Dict = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 _lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon _lowerCAmelCase : Union[str, Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(_A , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(_A , 4 , 1_8 ) else: return datetime(_A , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (19_94, 20_00, 20_10, 20_21, 20_23): lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was""" print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
630
1
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase : str = logging.get_logger(__name__) # General docstring lowerCAmelCase : Optional[Any] = """RegNetConfig""" # Base docstring lowerCAmelCase : int = """facebook/regnet-y-040""" lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7] # Image classification docstring lowerCAmelCase : Any = """facebook/regnet-y-040""" lowerCAmelCase : Optional[Any] = """tabby, tabby cat""" lowerCAmelCase : Tuple = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , ) _lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) _lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) ) _lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ ) _lowerCAmelCase : int = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : str = config.num_channels _lowerCAmelCase : List[Any] = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) ) _lowerCAmelCase : Tuple = self.embedder(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' ) _lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ ) class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' ) _lowerCAmelCase : str = [ tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.pooler(snake_case__ ) for layer_module in self.attention: _lowerCAmelCase : Tuple = layer_module(snake_case__ ) _lowerCAmelCase : Optional[Any] = hidden_state * pooled return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1 _lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width ) _lowerCAmelCase : Optional[Any] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _lowerCAmelCase : Any = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ), ] _lowerCAmelCase : List[str] = ACTaFN[config.hidden_act] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = hidden_state for layer_module in self.layers: _lowerCAmelCase : int = layer_module(snake_case__ ) _lowerCAmelCase : int = self.shortcut(snake_case__ ) hidden_state += residual _lowerCAmelCase : Tuple = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1 _lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width ) _lowerCAmelCase : Optional[Any] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) _lowerCAmelCase : Tuple = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ), ] _lowerCAmelCase : Tuple = ACTaFN[config.hidden_act] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = hidden_state for layer_module in self.layers: _lowerCAmelCase : List[Any] = layer_module(snake_case__ ) _lowerCAmelCase : Tuple = self.shortcut(snake_case__ ) hidden_state += residual _lowerCAmelCase : str = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer _lowerCAmelCase : Optional[int] = [ # downsampling is done in the first layer with stride of 2 layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ), *[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )], ] def a ( self , snake_case__ ): '''simple docstring''' for layer_module in self.layers: _lowerCAmelCase : int = layer_module(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : str = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) _lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) ) def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ): '''simple docstring''' _lowerCAmelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _lowerCAmelCase : str = hidden_states + (hidden_state,) _lowerCAmelCase : List[str] = stage_module(snake_case__ ) if output_hidden_states: _lowerCAmelCase : Dict = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) @keras_serializable class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" __magic_name__ = RegNetConfig def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = config _lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' ) _lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' ) _lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' ) @unpack_inputs def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' _lowerCAmelCase : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ ) _lowerCAmelCase : List[str] = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) _lowerCAmelCase : List[Any] = encoder_outputs[0] _lowerCAmelCase : Tuple = self.pooler(snake_case__ ) # Change to NCHW output format have uniformity in the modules _lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) _lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = RegNetConfig __magic_name__ = "regnet" __magic_name__ = "pixel_values" @property def a ( self ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} lowerCAmelCase : List[Any] = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ lowerCAmelCase : Dict = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) _lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : str = self.regnet( pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = config.num_labels _lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' ) # classification head _lowerCAmelCase : Optional[int] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : Dict = self.regnet( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) _lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] _lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ ) _lowerCAmelCase : Tuple = self.classifier[1](snake_case__ ) _lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ ) if not return_dict: _lowerCAmelCase : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
630
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60] _lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12] _lowerCAmelCase : Dict = 100 self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' ) def a ( self ): '''simple docstring''' self.assertRaisesRegex( snake_case__ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
630
1
'''simple docstring''' class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = arr.split(',' ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = [int(self.array[0] )] * len(self.array ) _lowerCAmelCase : Optional[Any] = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): _lowerCAmelCase : Tuple = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) _lowerCAmelCase : int = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": lowerCAmelCase : str = input("""please input some numbers:""") lowerCAmelCase : List[str] = SubArray(whole_array) lowerCAmelCase : Optional[int] = array.solve_sub_array() print(("""the results is:""", re))
630
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps _lowerCAmelCase : Any = boundary[0] _lowerCAmelCase : List[str] = boundary[1] _lowerCAmelCase : Tuple = make_points(_A , _A , _A ) _lowerCAmelCase : Tuple = 0.0 y += (h / 2.0) * f(_A ) for i in x_i: # print(i) y += h * f(_A ) y += (h / 2.0) * f(_A ) return y def lowercase (_A , _A , _A ): """simple docstring""" _lowerCAmelCase : Tuple = a + h while x < (b - h): yield x _lowerCAmelCase : Any = x + h def lowercase (_A ): # enter your function here """simple docstring""" _lowerCAmelCase : int = (x - 0) * (x - 0) return y def lowercase (): """simple docstring""" _lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration _lowerCAmelCase : Dict = 1.0 # Upper bound of integration _lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution _lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration _lowerCAmelCase : List[Any] = method_a(_A , _A ) print(f'y = {y}' ) if __name__ == "__main__": main()
630
1
'''simple docstring''' def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : str = word.split() def justify(_A , _A , _A ) -> str: _lowerCAmelCase : Optional[int] = max_width - width _lowerCAmelCase : List[Any] = len(_A ) if len(_A ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _lowerCAmelCase : Optional[int] = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _lowerCAmelCase : str = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _lowerCAmelCase : List[Any] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_A ): num_spaces_between_words_list[i] += 1 _lowerCAmelCase : str = [] for i in range(_A ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_A ) _lowerCAmelCase : Optional[Any] = [] _lowerCAmelCase : list[str] = [] _lowerCAmelCase : Any = 0 for word in words: if width + len(_A ) + len(_A ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_A ) width += len(_A ) else: # justify the line and add it to result answer.append(justify(_A , _A , _A ) ) # reset new line and new width _lowerCAmelCase , _lowerCAmelCase : int = [word], len(_A ) _lowerCAmelCase : int = max_width - width - len(_A ) answer.append(' '.join(_A ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
630
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase : int = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
1
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def a ( *snake_case__ , **snake_case__ ): '''simple docstring''' pass def lowercase (_A ): """simple docstring""" _lowerCAmelCase : List[str] = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" __magic_name__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def a ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = DepthEstimationPipeline(model=snake_case__ , image_processor=snake_case__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def a ( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' ) self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , snake_case__ ) import datasets _lowerCAmelCase : List[str] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) _lowerCAmelCase : List[str] = depth_estimator( [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] ) self.assertEqual( [ {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, ] , snake_case__ , ) @require_tf @unittest.skip('Depth estimation is not implemented in TF' ) def a ( self ): '''simple docstring''' pass @slow @require_torch def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = 'Intel/dpt-large' _lowerCAmelCase : Any = pipeline('depth-estimation' , model=snake_case__ ) _lowerCAmelCase : Optional[int] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' ) _lowerCAmelCase : List[str] = hashimage(outputs['depth'] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 ) @require_torch def a ( self ): '''simple docstring''' self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
630
'''simple docstring''' from collections import Counter from timeit import timeit def lowercase (_A = "" , ): """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2 def lowercase (_A = "" ): """simple docstring""" if len(_A ) == 0: return True _lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string _lowerCAmelCase : dict[str, int] = {} for character in lower_case_input_str: _lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1 _lowerCAmelCase : List[Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def lowercase (_A = "" ): """simple docstring""" print('\nFor string = ' , _A , ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) print( '> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) if __name__ == "__main__": lowerCAmelCase : Tuple = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
630
1
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : int = int(_A ) if n_element < 1: _lowerCAmelCase : Any = ValueError('a should be a positive number' ) raise my_error _lowerCAmelCase : Union[str, Any] = [1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = (0, 0, 0) _lowerCAmelCase : Tuple = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": lowerCAmelCase : str = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") lowerCAmelCase : Optional[int] = hamming(int(n)) print("""-----------------------------------------------------""") print(F'''The list with nth numbers is: {hamming_numbers}''') print("""-----------------------------------------------------""")
630
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : int = { """facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "data2vec-text" def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Tuple = hidden_size _lowerCAmelCase : Dict = num_hidden_layers _lowerCAmelCase : int = num_attention_heads _lowerCAmelCase : str = hidden_act _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : Any = hidden_dropout_prob _lowerCAmelCase : Optional[int] = attention_probs_dropout_prob _lowerCAmelCase : str = max_position_embeddings _lowerCAmelCase : Any = type_vocab_size _lowerCAmelCase : int = initializer_range _lowerCAmelCase : List[str] = layer_norm_eps _lowerCAmelCase : List[Any] = position_embedding_type _lowerCAmelCase : str = use_cache _lowerCAmelCase : Union[str, Any] = classifier_dropout class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def a ( self ): '''simple docstring''' if self.task == "multiple-choice": _lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
630
1
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @property def a ( self ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = ort.SessionOptions() _lowerCAmelCase : str = False return options def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png' ) _lowerCAmelCase : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png' ) _lowerCAmelCase : List[str] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' ) # using the PNDM scheduler by default _lowerCAmelCase : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = 'A red cat sitting on a park bench' _lowerCAmelCase : Tuple = np.random.RandomState(0 ) _lowerCAmelCase : Tuple = pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=snake_case__ , output_type='np' , ) _lowerCAmelCase : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
630
'''simple docstring''' import pytest import datasets # Import fixture modules as plugins lowerCAmelCase : List[str] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""] def lowercase (_A , _A ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def lowercase (_A ): """simple docstring""" config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=_A ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : str = tmp_path_factory.getbasetemp() / 'cache' _lowerCAmelCase : Dict = test_hf_cache_home / 'datasets' _lowerCAmelCase : List[Any] = test_hf_cache_home / 'metrics' _lowerCAmelCase : List[Any] = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_A ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_A ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_A ) ) _lowerCAmelCase : Dict = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_A ) ) _lowerCAmelCase : Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_A ) ) @pytest.fixture(autouse=_A , scope='session' ) def lowercase (): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_A ) def lowercase (_A ): """simple docstring""" monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _A ) @pytest.fixture def lowercase (_A ): """simple docstring""" monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _A )
630
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Optional[Any] = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ """OPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OPTForCausalLM""", """OPTModel""", """OPTPreTrainedModel""", """OPTForSequenceClassification""", """OPTForQuestionAnswering""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """FlaxOPTForCausalLM""", """FlaxOPTModel""", """FlaxOPTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
630
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase : str = logging.get_logger(__name__) # General docstring lowerCAmelCase : Optional[Any] = """RegNetConfig""" # Base docstring lowerCAmelCase : int = """facebook/regnet-y-040""" lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7] # Image classification docstring lowerCAmelCase : Any = """facebook/regnet-y-040""" lowerCAmelCase : Optional[Any] = """tabby, tabby cat""" lowerCAmelCase : Tuple = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , ) _lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) _lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) ) _lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ ) _lowerCAmelCase : int = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : str = config.num_channels _lowerCAmelCase : List[Any] = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) ) _lowerCAmelCase : Tuple = self.embedder(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD( filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' ) _lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ ) class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' ) _lowerCAmelCase : str = [ tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = self.pooler(snake_case__ ) for layer_module in self.attention: _lowerCAmelCase : Tuple = layer_module(snake_case__ ) _lowerCAmelCase : Optional[Any] = hidden_state * pooled return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1 _lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width ) _lowerCAmelCase : Optional[Any] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _lowerCAmelCase : Any = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ), ] _lowerCAmelCase : List[str] = ACTaFN[config.hidden_act] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = hidden_state for layer_module in self.layers: _lowerCAmelCase : int = layer_module(snake_case__ ) _lowerCAmelCase : int = self.shortcut(snake_case__ ) hidden_state += residual _lowerCAmelCase : Tuple = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1 _lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width ) _lowerCAmelCase : Optional[Any] = ( TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) _lowerCAmelCase : Tuple = [ TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ), ] _lowerCAmelCase : Tuple = ACTaFN[config.hidden_act] def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Dict = hidden_state for layer_module in self.layers: _lowerCAmelCase : List[Any] = layer_module(snake_case__ ) _lowerCAmelCase : Tuple = self.shortcut(snake_case__ ) hidden_state += residual _lowerCAmelCase : str = self.activation(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer _lowerCAmelCase : Optional[int] = [ # downsampling is done in the first layer with stride of 2 layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ), *[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )], ] def a ( self , snake_case__ ): '''simple docstring''' for layer_module in self.layers: _lowerCAmelCase : int = layer_module(snake_case__ ) return hidden_state class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : str = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) _lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) ) def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ): '''simple docstring''' _lowerCAmelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _lowerCAmelCase : str = hidden_states + (hidden_state,) _lowerCAmelCase : List[str] = stage_module(snake_case__ ) if output_hidden_states: _lowerCAmelCase : Dict = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) @keras_serializable class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" __magic_name__ = RegNetConfig def __init__( self , snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(**snake_case__ ) _lowerCAmelCase : Union[str, Any] = config _lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' ) _lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' ) _lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' ) @unpack_inputs def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' _lowerCAmelCase : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ ) _lowerCAmelCase : List[str] = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) _lowerCAmelCase : List[Any] = encoder_outputs[0] _lowerCAmelCase : Tuple = self.pooler(snake_case__ ) # Change to NCHW output format have uniformity in the modules _lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) _lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = RegNetConfig __magic_name__ = "regnet" __magic_name__ = "pixel_values" @property def a ( self ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} lowerCAmelCase : List[Any] = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ lowerCAmelCase : Dict = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) _lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : str = self.regnet( pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , *snake_case__ , **snake_case__ ) _lowerCAmelCase : Optional[Any] = config.num_labels _lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' ) # classification head _lowerCAmelCase : Optional[int] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ): '''simple docstring''' _lowerCAmelCase : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase : Dict = self.regnet( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ ) _lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] _lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ ) _lowerCAmelCase : Tuple = self.classifier[1](snake_case__ ) _lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ ) if not return_dict: _lowerCAmelCase : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
630
1