code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowercase = get_tests_dir('''fixtures/spiece.model''') @require_sentencepiece @require_tokenizers class a__( A__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Any = DebertaVaTokenizer UpperCAmelCase_ : int = DebertaVaTokenizerFast UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : List[Any] = True def a_ ( self): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase = DebertaVaTokenizer(__A , unk_token="""<unk>""") tokenizer.save_pretrained(self.tmpdirname) def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = """this is a test""" lowerCAmelCase = """this is a test""" return input_text, output_text def a_ ( self): """simple docstring""" lowerCAmelCase = """<pad>""" lowerCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A) , __A) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A) , __A) def a_ ( self): """simple docstring""" lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<pad>""") self.assertEqual(vocab_keys[1] , """<unk>""") self.assertEqual(vocab_keys[-1] , """[PAD]""") self.assertEqual(len(__A) , 30001) def a_ ( self): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 30000) def a_ ( self): """simple docstring""" lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """ lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""] # fmt: on lowerCAmelCase = DebertaVaTokenizer(__A , do_lower_case=__A) lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) lowerCAmelCase = DebertaVaTokenizerFast(__A , do_lower_case=__A) lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""") def a_ ( self): """simple docstring""" pass @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""") def a_ ( self): """simple docstring""" pass def a_ ( self): """simple docstring""" lowerCAmelCase = """I was born in 92000, and this is falsé.""" lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on lowerCAmelCase = DebertaVaTokenizer(__A , split_by_punct=__A) lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) lowerCAmelCase = DebertaVaTokenizerFast(__A , split_by_punct=__A) lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) def a_ ( self): """simple docstring""" lowerCAmelCase = """I was born in 92000, and this is falsé.""" lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on lowerCAmelCase = DebertaVaTokenizer(__A , do_lower_case=__A , split_by_punct=__A) lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) lowerCAmelCase = DebertaVaTokenizerFast(__A , do_lower_case=__A , split_by_punct=__A) lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) def a_ ( self): """simple docstring""" lowerCAmelCase = """I was born in 92000, and this is falsé.""" lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ] # fmt: on lowerCAmelCase = DebertaVaTokenizer(__A , do_lower_case=__A , split_by_punct=__A) lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) lowerCAmelCase = DebertaVaTokenizerFast(__A , do_lower_case=__A , split_by_punct=__A) lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) def a_ ( self): """simple docstring""" lowerCAmelCase = """I was born in 92000, and this is falsé.""" lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on lowerCAmelCase = DebertaVaTokenizer(__A , do_lower_case=__A , split_by_punct=__A) lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) lowerCAmelCase = DebertaVaTokenizerFast(__A , do_lower_case=__A , split_by_punct=__A) lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) def a_ ( self): """simple docstring""" lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """ lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""] # fmt: on lowerCAmelCase = DebertaVaTokenizer(__A , do_lower_case=__A , split_by_punct=__A) lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) lowerCAmelCase = DebertaVaTokenizerFast(__A , do_lower_case=__A , split_by_punct=__A) lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = """I was born in 92000, and this is falsé.""" lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A)) lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A)) self.assertListEqual(__A , __A) lowerCAmelCase = tokenizer.encode(__A , add_special_tokens=__A) lowerCAmelCase = rust_tokenizer.encode(__A , add_special_tokens=__A) self.assertListEqual(__A , __A) lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = tokenizer.encode(__A) lowerCAmelCase = rust_tokenizer.encode(__A) self.assertListEqual(__A , __A) def a_ ( self): """simple docstring""" lowerCAmelCase = """This is a test""" lowerCAmelCase = [13, 1, 4398, 25, 21, 1289] lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""] lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""] lowerCAmelCase = DebertaVaTokenizer(__A , keep_accents=__A) lowerCAmelCase = DebertaVaTokenizerFast(__A , keep_accents=__A) lowerCAmelCase = tokenizer.encode(__A , add_special_tokens=__A) self.assertListEqual(__A , __A) lowerCAmelCase = tokenizer.tokenize(__A) self.assertListEqual(__A , __A) lowerCAmelCase = tokenizer.convert_ids_to_tokens(__A) self.assertListEqual(__A , __A) lowerCAmelCase = rust_tokenizer.encode(__A , add_special_tokens=__A) self.assertListEqual(__A , __A) lowerCAmelCase = rust_tokenizer.tokenize(__A) self.assertListEqual(__A , __A) lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(__A) self.assertListEqual(__A , __A) # fmt: off lowerCAmelCase = """I was born in 92000, and this is falsé.""" lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ] lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ] # fmt: on lowerCAmelCase = tokenizer.encode(__A , add_special_tokens=__A) self.assertListEqual(__A , __A) lowerCAmelCase = tokenizer.tokenize(__A) self.assertListEqual(__A , __A) lowerCAmelCase = tokenizer.convert_ids_to_tokens(__A) self.assertListEqual(__A , __A) lowerCAmelCase = rust_tokenizer.encode(__A , add_special_tokens=__A) self.assertListEqual(__A , __A) lowerCAmelCase = rust_tokenizer.tokenize(__A) self.assertListEqual(__A , __A) lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(__A) self.assertListEqual(__A , __A) def a_ ( self): """simple docstring""" lowerCAmelCase = DebertaVaTokenizer(__A) lowerCAmelCase = tokenizer.encode("""sequence builders""") lowerCAmelCase = tokenizer.encode("""multi-sequence build""") lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__A) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__A , __A) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __A) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __A , ) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = {"""input_ids""": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__A , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
272
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [False] * len(lowercase__ ) lowerCAmelCase_ :str = [] queue.append(lowercase__ ) lowerCAmelCase_ :Any = True while queue: lowerCAmelCase_ :Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :int = u return visited[t] def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ )) lowerCAmelCase_ :str = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = float("""Inf""" ) lowerCAmelCase_ :List[str] = sink while s != source: # Find the minimum value in select path lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] ) lowerCAmelCase_ :Union[str, Any] = parent[s] max_flow += path_flow lowerCAmelCase_ :Tuple = sink while v != source: lowerCAmelCase_ :List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase_ :Union[str, Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
84
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=1_0 , __snake_case=1_8 , __snake_case=3_0 , __snake_case=4_0_0 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=[0.5, 0.5, 0.5] , __snake_case=[0.5, 0.5, 0.5] , __snake_case=None , ): snake_case = size if size is not None else {"""shortest_edge""": 1_8} snake_case = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8} snake_case = parent snake_case = batch_size snake_case = num_channels snake_case = num_frames snake_case = image_size snake_case = min_resolution snake_case = max_resolution snake_case = do_resize snake_case = size snake_case = do_normalize snake_case = image_mean snake_case = image_std snake_case = crop_size def a_ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class A__ ( A__ , unittest.TestCase ): """simple docstring""" __magic_name__ = VivitImageProcessor if is_vision_available() else None def a_ ( self ): snake_case = VivitImageProcessingTester(self ) @property def a_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self ): snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , '''image_mean''' ) ) self.assertTrue(hasattr(__A , '''image_std''' ) ) self.assertTrue(hasattr(__A , '''do_normalize''' ) ) self.assertTrue(hasattr(__A , '''do_resize''' ) ) self.assertTrue(hasattr(__A , '''do_center_crop''' ) ) self.assertTrue(hasattr(__A , '''size''' ) ) def a_ ( self ): snake_case = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8} ) self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} ) snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} ) self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} ) def a_ ( self ): # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A ) for video in video_inputs: self.assertIsInstance(__A , __A ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case = image_processing(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def a_ ( self ): # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for video in video_inputs: self.assertIsInstance(__A , __A ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case = image_processing(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def a_ ( self ): # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for video in video_inputs: self.assertIsInstance(__A , __A ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case = image_processing(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
127
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 1_0 lowerCAmelCase_ :Optional[int] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) lowerCAmelCase_ :int = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0, """id""": list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" lowerCAmelCase_ :List[Any] = FILE_CONTENT with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> Tuple: '''simple docstring''' import bza lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' import gzip lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with gzip.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" ) with lza.frame.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any: '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' import tarfile lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' import lzma lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" ) with lzma.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import zipfile lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" ) with zstd.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml""" lowerCAmelCase_ :Any = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Any: '''simple docstring''' lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ ) lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: lowerCAmelCase_ :Union[str, Any] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' import bza lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(lowercase__ , """rb""" ) as f: lowerCAmelCase_ :Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) lowerCAmelCase_ :Optional[Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(lowercase__ , """wb""" ) as f: lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> int: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Tuple: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) return data_dir
84
0
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class _UpperCamelCase : '''simple docstring''' @staticmethod def snake_case ( *__a , **__a ): pass def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = np.array(lowercase__ ) __lowerCAmelCase = npimg.shape return {"hash": hashimage(lowercase__ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : str =dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __UpperCAmelCase : int =dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def snake_case ( self , __a , __a , __a ): __lowerCAmelCase = MaskGenerationPipeline(model=__A , image_processor=__A ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def snake_case ( self , __a , __a ): pass @require_tf @unittest.skip("Image segmentation not implemented in TF" ) def snake_case ( self ): pass @slow @require_torch def snake_case ( self ): __lowerCAmelCase = pipeline("mask-generation" , model="facebook/sam-vit-huge" ) __lowerCAmelCase = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_56 ) # Shortening by hashing __lowerCAmelCase = [] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(__A ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__A , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0_4_4_4}, {"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0_2_1}, {"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0_1_6_7}, {"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0_1_3_2}, {"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0_0_5_3}, {"mask": {"hash": "e2d0b7a0b7", "shape": (4_80, 6_40)}, "scores": 0.9_9_6_7}, {"mask": {"hash": "453c7844bd", "shape": (4_80, 6_40)}, "scores": 0.9_9_3}, {"mask": {"hash": "3d44f2926d", "shape": (4_80, 6_40)}, "scores": 0.9_9_0_9}, {"mask": {"hash": "64033ddc3f", "shape": (4_80, 6_40)}, "scores": 0.9_8_7_9}, {"mask": {"hash": "801064ff79", "shape": (4_80, 6_40)}, "scores": 0.9_8_3_4}, {"mask": {"hash": "6172f276ef", "shape": (4_80, 6_40)}, "scores": 0.9_7_1_6}, {"mask": {"hash": "b49e60e084", "shape": (4_80, 6_40)}, "scores": 0.9_6_1_2}, {"mask": {"hash": "a811e775fd", "shape": (4_80, 6_40)}, "scores": 0.9_5_9_9}, {"mask": {"hash": "a6a8ebcf4b", "shape": (4_80, 6_40)}, "scores": 0.9_5_5_2}, {"mask": {"hash": "9d8257e080", "shape": (4_80, 6_40)}, "scores": 0.9_5_3_2}, {"mask": {"hash": "32de6454a8", "shape": (4_80, 6_40)}, "scores": 0.9_5_1_6}, {"mask": {"hash": "af3d4af2c8", "shape": (4_80, 6_40)}, "scores": 0.9_4_9_9}, {"mask": {"hash": "3c6db475fb", "shape": (4_80, 6_40)}, "scores": 0.9_4_8_3}, {"mask": {"hash": "c290813fb9", "shape": (4_80, 6_40)}, "scores": 0.9_4_6_4}, {"mask": {"hash": "b6f0b8f606", "shape": (4_80, 6_40)}, "scores": 0.9_4_3}, {"mask": {"hash": "92ce16bfdf", "shape": (4_80, 6_40)}, "scores": 0.9_4_3}, {"mask": {"hash": "c749b25868", "shape": (4_80, 6_40)}, "scores": 0.9_4_0_8}, {"mask": {"hash": "efb6cab859", "shape": (4_80, 6_40)}, "scores": 0.9_3_3_5}, {"mask": {"hash": "1ff2eafb30", "shape": (4_80, 6_40)}, "scores": 0.9_3_2_6}, {"mask": {"hash": "788b798e24", "shape": (4_80, 6_40)}, "scores": 0.9_2_6_2}, {"mask": {"hash": "abea804f0e", "shape": (4_80, 6_40)}, "scores": 0.8_9_9_9}, {"mask": {"hash": "7b9e8ddb73", "shape": (4_80, 6_40)}, "scores": 0.8_9_8_6}, {"mask": {"hash": "cd24047c8a", "shape": (4_80, 6_40)}, "scores": 0.8_9_8_4}, {"mask": {"hash": "6943e6bcbd", "shape": (4_80, 6_40)}, "scores": 0.8_8_7_3}, {"mask": {"hash": "b5f47c9191", "shape": (4_80, 6_40)}, "scores": 0.8_8_7_1} ] , ) # fmt: on @require_torch @slow def snake_case ( self ): __lowerCAmelCase = """facebook/sam-vit-huge""" __lowerCAmelCase = pipeline("mask-generation" , model=__A ) __lowerCAmelCase = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_56 ) # Shortening by hashing __lowerCAmelCase = [] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(__A ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__A , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0_4_4_4}, {"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0_2_1_0}, {"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0_1_6_7}, {"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0_1_3_2}, {"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0_0_5_3}, ] , )
57
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[Any] = "data2vec-text" def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Dict = vocab_size lowerCAmelCase_ :Dict = hidden_size lowerCAmelCase_ :int = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :Any = hidden_act lowerCAmelCase_ :Optional[int] = intermediate_size lowerCAmelCase_ :str = hidden_dropout_prob lowerCAmelCase_ :Any = attention_probs_dropout_prob lowerCAmelCase_ :str = max_position_embeddings lowerCAmelCase_ :int = type_vocab_size lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :List[Any] = layer_norm_eps lowerCAmelCase_ :List[Any] = position_embedding_type lowerCAmelCase_ :List[Any] = use_cache lowerCAmelCase_ :List[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
84
0
from __future__ import annotations def a_ ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return [ord(lowercase__ ) - 96 for elem in plain] def a_ ( SCREAMING_SNAKE_CASE__ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def a_ ( ): '''simple docstring''' _lowerCamelCase : str =encode(input('-> ' ).strip().lower() ) print('Encoded: ' , lowercase__ ) print('Decoded:' , decode(lowercase__ ) ) if __name__ == "__main__": main()
199
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase_ :Tuple = (wi_a, wi_a) else: lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ :List[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ :Optional[Any] = layer_norm lowerCAmelCase_ :Any = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Tuple = q.T lowerCAmelCase_ :str = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :List[Any] = wi[0].T lowerCAmelCase_ :Dict = wi[1].T else: lowerCAmelCase_ :int = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ :List[Any] = layer_norm lowerCAmelCase_ :List[str] = k.T lowerCAmelCase_ :Any = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :Dict = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ :Optional[int] = layer_norm lowerCAmelCase_ :str = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :int = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ :List[Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :Any = wi[0].T lowerCAmelCase_ :Any = wi[1].T else: lowerCAmelCase_ :Tuple = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ :Optional[Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T return new def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ :Any = state_dict["""shared.weight"""] return state_dict def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
84
0
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } lowerCAmelCase__ = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> str: '''simple docstring''' A__ = EfficientNetConfig() A__ = CONFIG_MAP[model_name]["""hidden_dim"""] A__ = CONFIG_MAP[model_name]["""width_coef"""] A__ = CONFIG_MAP[model_name]["""depth_coef"""] A__ = CONFIG_MAP[model_name]["""image_size"""] A__ = CONFIG_MAP[model_name]["""dropout_rate"""] A__ = CONFIG_MAP[model_name]["""dw_padding"""] A__ = """huggingface/label-files""" A__ = """imagenet-1k-id2label.json""" A__ = 1_0_0_0 A__ = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) ) A__ = {int(lowercase__ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase__ ( ) -> int: '''simple docstring''' A__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" A__ = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> Tuple: '''simple docstring''' A__ = CONFIG_MAP[model_name]["""image_size"""] A__ = EfficientNetImageProcessor( size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , ) return preprocessor def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> List[str]: '''simple docstring''' A__ = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] A__ = sorted(set(lowercase__ ) ) A__ = len(lowercase__ ) A__ = {b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )} A__ = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: A__ = block_name_mapping[b] rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') ) rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') ) rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') ) rename_keys.append( (F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') ) rename_keys.append( (F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') ) rename_keys.append( (F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') ) rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') ) rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') ) rename_keys.append( (F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') ) rename_keys.append( (F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') ) rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') ) rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') ) rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') ) rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') ) rename_keys.append( (F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') ) rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') ) rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') ) rename_keys.append( (F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') ) rename_keys.append( (F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) A__ = {} for item in rename_keys: if item[0] in original_param_names: A__ = """efficientnet.""" + item[1] A__ = """classifier.weight""" A__ = """classifier.bias""" return key_mapping def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: List[Any] ) -> Union[str, Any]: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue A__ = key_mapping[key] if "_conv" in key and "kernel" in key: A__ = torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: A__ = torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: A__ = torch.from_numpy(np.transpose(lowercase__ ) ) else: A__ = torch.from_numpy(lowercase__ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase__ ) @torch.no_grad() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[int] ) -> Optional[int]: '''simple docstring''' A__ = model_classes[model_name]( include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , ) A__ = original_model.trainable_variables A__ = original_model.non_trainable_variables A__ = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: A__ = param.numpy() A__ = list(tf_params.keys() ) # Load HuggingFace model A__ = get_efficientnet_config(lowercase__ ) A__ = EfficientNetForImageClassification(lowercase__ ).eval() A__ = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) A__ = rename_keys(lowercase__ ) replace_params(lowercase__ , lowercase__ , lowercase__ ) # Initialize preprocessor and preprocess input image A__ = convert_image_processor(lowercase__ ) A__ = preprocessor(images=prepare_img() , return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): A__ = hf_model(**lowercase__ ) A__ = outputs.logits.detach().numpy() # Original model inference A__ = False A__ = CONFIG_MAP[model_name]["""image_size"""] A__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) A__ = image.img_to_array(lowercase__ ) A__ = np.expand_dims(lowercase__ , axis=0 ) A__ = original_model.predict(lowercase__ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase__ , lowercase__ , atol=1e-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(lowercase__ ): os.mkdir(lowercase__ ) # Save converted model and image processor hf_model.save_pretrained(lowercase__ ) preprocessor.save_pretrained(lowercase__ ) if push_to_hub: # Push model and image processor to hub print(F'Pushing converted {model_name} to the hub...' ) A__ = F'efficientnet-{model_name}' preprocessor.push_to_hub(lowercase__ ) hf_model.push_to_hub(lowercase__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") lowerCAmelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
68
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Optional[Any] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" ) if "norm" in key: lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" ) if "layer_norm1" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )] lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" ) if "attn.q" in key: lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" ) if "bot_conv" in key: lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCAmelCase_ :List[Any] = value return new_state_dict def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ :Optional[Any] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ :List[Any] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :] def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int: '''simple docstring''' lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor() # prepare image lowerCAmelCase_ :List[Any] = prepare_img() lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) ) # rename keys lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ ) # key and value matrices need special treatment read_in_k_v(lowercase__ , lowercase__ ) # create HuggingFace model and load state dict lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # forward pass lowerCAmelCase_ :Dict = model(lowercase__ ) lowerCAmelCase_ :Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase_ :Optional[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase_ :Any = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __UpperCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
84
0
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.17.0.dev0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''') UpperCamelCase_ = logging.getLogger(__name__) @dataclass class _snake_case : '''simple docstring''' A__ : Optional[str] = field( default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) A__ : Optional[str] = field( default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , ) A__ : int = field( default=1_024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ : bool = field( default=A__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) A__ : bool = field( default=A__ , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) A__ : Optional[int] = field( default=A__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) A__ : Optional[int] = field( default=A__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) A__ : Optional[int] = field( default=A__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) A__ : Optional[str] = field( default=A__ , metadata={"help": "A csv or a json file containing the training data."} ) A__ : Optional[str] = field( default=A__ , metadata={"help": "A csv or a json file containing the validation data."} ) A__ : Optional[str] = field(default=A__ , metadata={"help": "A csv or a json file containing the test data."} ) def A__ ( self: Any ) -> int: if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: UpperCAmelCase_ : Optional[int] = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." UpperCAmelCase_ : List[Any] = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _snake_case : '''simple docstring''' A__ : str = field( default=A__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) A__ : bool = field( default=A__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) A__ : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) A__ : bool = field( default=A__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase_ : Tuple = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) UpperCAmelCase_ : Dict = training_args.get_process_log_level() logger.setLevel(lowercase__ ) datasets.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. UpperCAmelCase_ : Optional[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase_ : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. UpperCAmelCase_ : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. UpperCAmelCase_ : Optional[Any] = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: UpperCAmelCase_ : Any = data_args.train_file.split(""".""" )[-1] UpperCAmelCase_ : str = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." UpperCAmelCase_ : List[str] = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(F'''load a local file for {key}: {data_files[key]}''' ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files UpperCAmelCase_ : List[str] = load_dataset("""csv""" , data_files=lowercase__ , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files UpperCAmelCase_ : int = load_dataset("""json""" , data_files=lowercase__ , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels UpperCAmelCase_ : Dict = raw_datasets["""train"""].features["""label"""].names UpperCAmelCase_ : Any = len(lowercase__ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer UpperCAmelCase_ : Any = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase__ , ) UpperCAmelCase_ : Optional[int] = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: UpperCAmelCase_ : Tuple = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch UpperCAmelCase_ : List[str] = False # Some models have set the order of the labels to use, so let's make sure we do use it. UpperCAmelCase_ : Union[str, Any] = {"""Refused""": 0, """Entailed""": 1} UpperCAmelCase_ : List[str] = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) UpperCAmelCase_ : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_a : List[Any] ): # Tokenize the texts def _convert_table_text_to_pandas(_a : Dict ): UpperCAmelCase_ : List[str] = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] UpperCAmelCase_ : Optional[int] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd UpperCAmelCase_ : List[Any] = examples["""statement"""] UpperCAmelCase_ : str = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) UpperCAmelCase_ : List[str] = tokenizer(lowercase__ , lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ ) UpperCAmelCase_ : str = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): UpperCAmelCase_ : Union[str, Any] = raw_datasets.map( lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) UpperCAmelCase_ : Optional[Any] = raw_datasets["""train"""] if data_args.max_train_samples is not None: UpperCAmelCase_ : Any = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) UpperCAmelCase_ : List[Any] = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: UpperCAmelCase_ : Any = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) UpperCAmelCase_ : Optional[Any] = raw_datasets["""test"""] if data_args.max_predict_samples is not None: UpperCAmelCase_ : Any = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(lowercase__ ) ) , 3 ): logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_a : EvalPrediction ): UpperCAmelCase_ : Optional[Any] = p.predictions[0] if isinstance(p.predictions , lowercase__ ) else p.predictions UpperCAmelCase_ : Optional[Any] = np.argmax(lowercase__ , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: UpperCAmelCase_ : List[str] = default_data_collator elif training_args.fpaa: UpperCAmelCase_ : Union[str, Any] = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 ) else: UpperCAmelCase_ : Any = None # Initialize our Trainer UpperCAmelCase_ : Optional[int] = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: UpperCAmelCase_ : Optional[int] = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase_ : List[str] = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase_ : str = last_checkpoint UpperCAmelCase_ : Any = trainer.train(resume_from_checkpoint=lowercase__ ) UpperCAmelCase_ : List[Any] = train_result.metrics UpperCAmelCase_ : List[str] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ ) ) UpperCAmelCase_ : Union[str, Any] = min(lowercase__ , len(lowercase__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , lowercase__ ) trainer.save_metrics("""train""" , lowercase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) UpperCAmelCase_ : List[Any] = trainer.evaluate(eval_dataset=lowercase__ ) UpperCAmelCase_ : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ ) UpperCAmelCase_ : Dict = min(lowercase__ , len(lowercase__ ) ) trainer.log_metrics("""eval""" , lowercase__ ) trainer.save_metrics("""eval""" , lowercase__ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. UpperCAmelCase_ : str = predict_dataset.remove_columns("""label""" ) UpperCAmelCase_ : List[str] = trainer.predict(lowercase__ , metric_key_prefix="""predict""" ).predictions UpperCAmelCase_ : Optional[int] = np.argmax(lowercase__ , axis=1 ) UpperCAmelCase_ : Optional[int] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(lowercase__ , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(lowercase__ ): UpperCAmelCase_ : Optional[int] = label_list[item] writer.write(F'''{index}\t{item}\n''' ) UpperCAmelCase_ : Any = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**lowercase__ ) else: trainer.create_model_card(**lowercase__ ) def lowerCamelCase_ ( _a : Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
345
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu""" def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : int=1_00 , _lowerCamelCase : int=" " ) -> List[str]: _lowerCAmelCase : Tuple = text.split(lowercase__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase__ ) , lowercase__ )] def _UpperCAmelCase ( _lowerCamelCase : dict ) -> dict: _lowerCAmelCase : List[Any] = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(lowercase__ ): titles.append(title if title is not None else """""" ) texts.append(lowercase__ ) return {"title": titles, "text": texts} def _UpperCAmelCase ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast ) -> dict: _lowerCAmelCase : Tuple = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=lowercase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] _lowerCAmelCase : int = ctx_encoder(input_ids.to(device=lowercase__ ) , return_dict=lowercase__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _UpperCAmelCase ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ) -> Optional[Any]: logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way _lowerCAmelCase : Tuple = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words _lowerCAmelCase : str = dataset.map(lowercase__ , batched=lowercase__ , num_proc=processing_args.num_proc ) # And compute the embeddings _lowerCAmelCase : Optional[int] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase__ ) _lowerCAmelCase : List[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) _lowerCAmelCase : str = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space _lowerCAmelCase : str = dataset.map( partial(lowercase__ , ctx_encoder=lowercase__ , ctx_tokenizer=lowercase__ ) , batched=lowercase__ , batch_size=processing_args.batch_size , features=lowercase__ , ) # And finally save your dataset _lowerCAmelCase : Dict = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(lowercase__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search _lowerCAmelCase : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=lowercase__ ) # And save the index _lowerCAmelCase : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(lowercase__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class a_ : __lowerCAmelCase : str = field( default=str(Path(A__ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , ) __lowerCAmelCase : Optional[str] = field( default=A__ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , ) __lowerCAmelCase : str = field( default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , ) __lowerCAmelCase : str = field( default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={ """help""": ( """The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or""" """ 'facebook/dpr-ctx_encoder-multiset-base'""" ) } , ) __lowerCAmelCase : Optional[str] = field( default=str(Path(A__ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , ) @dataclass class a_ : __lowerCAmelCase : Optional[int] = field( default=A__ , metadata={ """help""": """The number of processes to use to split the documents into passages. Default is single process.""" } , ) __lowerCAmelCase : int = field( default=1_6 , metadata={ """help""": """The batch size to use when computing the passages embeddings using the DPR context encoder.""" } , ) @dataclass class a_ : __lowerCAmelCase : int = field( default=7_6_8 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , ) __lowerCAmelCase : int = field( default=1_2_8 , metadata={ """help""": ( """The number of bi-directional links created for every new element during the HNSW index construction.""" ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
309
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "levit" def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ :Tuple = image_size lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :Union[str, Any] = kernel_size lowerCAmelCase_ :Optional[Any] = stride lowerCAmelCase_ :Optional[int] = padding lowerCAmelCase_ :Optional[Any] = hidden_sizes lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :int = depths lowerCAmelCase_ :List[str] = key_dim lowerCAmelCase_ :str = drop_path_rate lowerCAmelCase_ :Optional[int] = patch_size lowerCAmelCase_ :Union[str, Any] = attention_ratio lowerCAmelCase_ :Dict = mlp_ratio lowerCAmelCase_ :Any = initializer_range lowerCAmelCase_ :Optional[int] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
84
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class A_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , lowercase_ : List[str] , lowercase_ : int=7 , lowercase_ : int=3 , lowercase_ : List[Any]=18 , lowercase_ : Dict=30 , lowercase_ : Dict=400 , lowercase_ : List[Any]=True , lowercase_ : List[str]=None , lowercase_ : List[str]=True , ) -> Optional[Any]: UpperCAmelCase : int = size if size is not None else {"""height""": 18, """width""": 18} UpperCAmelCase : int = parent UpperCAmelCase : List[Any] = batch_size UpperCAmelCase : Optional[Any] = num_channels UpperCAmelCase : Optional[Any] = image_size UpperCAmelCase : int = min_resolution UpperCAmelCase : List[Any] = max_resolution UpperCAmelCase : List[Any] = do_resize UpperCAmelCase : Tuple = size UpperCAmelCase : Dict = apply_ocr def UpperCAmelCase_ ( self : List[str] ) -> List[str]: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class A_ ( A__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None def UpperCAmelCase_ ( self : Tuple ) -> str: UpperCAmelCase : Tuple = LayoutLMvaImageProcessingTester(self ) @property def UpperCAmelCase_ ( self : List[str] ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self : List[str] ) -> str: UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , 'do_resize' ) ) self.assertTrue(hasattr(__A , 'size' ) ) self.assertTrue(hasattr(__A , 'apply_ocr' ) ) def UpperCAmelCase_ ( self : str ) -> int: UpperCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) UpperCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple: pass def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: # Initialize image_processing UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , __A ) self.assertIsInstance(encoding.boxes , __A ) # Test batched UpperCAmelCase : str = image_processing(__A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def UpperCAmelCase_ ( self : Optional[int] ) -> str: # Initialize image_processing UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCAmelCase : Dict = image_processing(__A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched UpperCAmelCase : int = image_processing(__A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]: # with apply_OCR = True UpperCAmelCase : str = LayoutLMvaImageProcessor() from datasets import load_dataset UpperCAmelCase : int = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) UpperCAmelCase : Optional[Any] = Image.open(ds[0]['file'] ).convert('RGB' ) UpperCAmelCase : Optional[Any] = image_processing(__A , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 UpperCAmelCase : List[Any] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 UpperCAmelCase : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __A ) self.assertListEqual(encoding.boxes , __A ) # with apply_OCR = False UpperCAmelCase : int = LayoutLMvaImageProcessor(apply_ocr=__A ) UpperCAmelCase : int = image_processing(__A , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
151
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Dict = 0.01 with locka.acquire(): with pytest.raises(lowercase__ ): lowerCAmelCase_ :List[Any] = time.time() locka.acquire(lowercase__ ) assert time.time() - _start > timeout def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock""" lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(lowercase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 lowerCAmelCase_ :Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase__ ): locka.acquire(0 )
84
0
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( 'The `inpainting.py` script is outdated. Please use directly `from diffusers import' ' StableDiffusionInpaintPipeline` instead.' )
137
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s __UpperCAmelCase = 3e8 # unit of c : m * s^-1 def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase_ :Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
84
0
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> bool: if not all(x.isalpha() for x in string ): raise ValueError('''String must only contain alphabetic characters.''' ) lowerCAmelCase = sorted(string.lower() ) return len(lowercase__ ) == len(set(lowercase__ ) ) if __name__ == "__main__": lowercase__ : Union[str, Any] = input('''Enter a string ''').strip() lowercase__ : str = is_isogram(input_str) print(f'{input_str} is {"an" if isogram else "not an"} isogram.')
338
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int: '''simple docstring''' if len(lowercase__ ) != len(lowercase__ ): raise ValueError("""String lengths must match!""" ) lowerCAmelCase_ :Optional[int] = 0 for chara, chara in zip(lowercase__ , lowercase__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
84
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class __A( A__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = "levit" def __init__(self , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1_28, 2_56, 3_84] , SCREAMING_SNAKE_CASE_=[4, 8, 12] , SCREAMING_SNAKE_CASE_=[4, 4, 4] , SCREAMING_SNAKE_CASE_=[16, 16, 16] , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=[2, 2, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2] , SCREAMING_SNAKE_CASE_=0.02 , **SCREAMING_SNAKE_CASE_ , ): super().__init__(**__A ) UpperCamelCase__ = image_size UpperCamelCase__ = num_channels UpperCamelCase__ = kernel_size UpperCamelCase__ = stride UpperCamelCase__ = padding UpperCamelCase__ = hidden_sizes UpperCamelCase__ = num_attention_heads UpperCamelCase__ = depths UpperCamelCase__ = key_dim UpperCamelCase__ = drop_path_rate UpperCamelCase__ = patch_size UpperCamelCase__ = attention_ratio UpperCamelCase__ = mlp_ratio UpperCamelCase__ = initializer_range UpperCamelCase__ = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __A( A__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = version.parse("""1.11""" ) @property def UpperCAmelCase_ (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase_ (self ): return 1E-4
244
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :int = nn.ModuleList(__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) # merge samples if i == 0: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample else: lowerCAmelCase_ :str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__A , __A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( __A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , ) idx += 1 lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}""" @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase_ :List[Any] = pretrained_model_path while os.path.isdir(__A ): lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A ) controlnets.append(__A ) idx += 1 lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" ) if len(__A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__A )
84
0
'''simple docstring''' import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class a__( A__ ): '''simple docstring''' def a_ ( self): """simple docstring""" lowerCAmelCase = pa.array(TypedSequence([1, 2, 3])) self.assertEqual(arr.type , pa.intaa()) def a_ ( self): """simple docstring""" with self.assertRaises(__A): lowerCAmelCase = pa.array(TypedSequence([1, 2, 3]) , type=pa.intaa()) def a_ ( self): """simple docstring""" with self.assertRaises(__A): lowerCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""") , type=Value("""int64"""))) def a_ ( self): """simple docstring""" lowerCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32"""))) self.assertEqual(arr.type , pa.intaa()) def a_ ( self): """simple docstring""" with self.assertRaises((TypeError, pa.lib.ArrowInvalid)): lowerCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64"""))) def a_ ( self): """simple docstring""" lowerCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32"""))) self.assertEqual(arr.type , pa.intaa()) def a_ ( self): """simple docstring""" lowerCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64"""))) self.assertEqual(arr.type , pa.string()) def a_ ( self): """simple docstring""" lowerCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64"""))) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""")) def a_ ( self): """simple docstring""" with self.assertRaises((TypeError, pa.lib.ArrowInvalid)): lowerCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64"""))) def a_ ( self): """simple docstring""" lowerCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64"""))) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""")) def a_ ( self): """simple docstring""" lowerCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64"""))) self.assertEqual(arr.type , pa.string()) @require_pil def a_ ( self): """simple docstring""" import PIL.Image lowerCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta).reshape(2 , 5)) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=__A) as mock_cast_to_python_objects: lowerCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image())) lowerCAmelCase = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , __A) self.assertFalse(kwargs["""optimize_list_casting"""]) def snake_case__ ( _A: Optional[Any] , _A: int ) -> Optional[int]: '''simple docstring''' lowerCAmelCase = pa.BufferReader(lowercase__ ) if isinstance(lowercase__ , pa.Buffer ) else pa.memory_map(lowercase__ ) lowerCAmelCase = pa.ipc.open_stream(lowercase__ ) lowerCAmelCase = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def snake_case__ ( _A: int , _A: str ) -> Any: '''simple docstring''' lowerCAmelCase = pa.BufferOutputStream() lowerCAmelCase = pa.schema(lowercase__ ) if fields else None with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) lowerCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def snake_case__ ( ) -> Optional[int]: '''simple docstring''' lowerCAmelCase = pa.BufferOutputStream() lowerCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=lowercase__ , features=lowercase__ ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) lowerCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata lowerCAmelCase = pa.BufferReader(output.getvalue() ) lowerCAmelCase = pa.ipc.open_stream(lowercase__ ) lowerCAmelCase = f.read_all() lowerCAmelCase = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(lowercase__ ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) def snake_case__ ( _A: Any ) -> str: '''simple docstring''' lowerCAmelCase = pa.BufferOutputStream() with ArrowWriter( stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="""split_name""" , check_duplicates=lowercase__ , ) as writer: with pytest.raises(lowercase__ ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) lowerCAmelCase = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def snake_case__ ( _A: Dict ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase = pa.BufferOutputStream() with ArrowWriter( stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="""split_name""" , check_duplicates=lowercase__ , ) as writer: with pytest.raises(lowercase__ ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 ) lowerCAmelCase = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def snake_case__ ( _A: Tuple ) -> Tuple: '''simple docstring''' lowerCAmelCase = pa.BufferOutputStream() with ArrowWriter( stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="""split_name""" , check_duplicates=lowercase__ , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) lowerCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def snake_case__ ( _A: List[str] , _A: List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase = pa.BufferOutputStream() lowerCAmelCase = pa.schema(lowercase__ ) if fields else None with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) lowerCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def snake_case__ ( _A: str , _A: Optional[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase = pa.BufferOutputStream() lowerCAmelCase = pa.schema(lowercase__ ) if fields else None with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) lowerCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def snake_case__ ( _A: Optional[int] , _A: Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase = pa.BufferOutputStream() lowerCAmelCase = pa.schema(lowercase__ ) if fields else None with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) lowerCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def snake_case__ ( ) -> Union[str, Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()} lowerCAmelCase = os.path.join(lowercase__ , """test.arrow""" ) with ArrowWriter(path=lowercase__ , schema=pa.schema(lowercase__ ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) lowerCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata ) _check_output(lowercase__ , 1 ) def snake_case__ ( _A: Optional[int] ) -> int: '''simple docstring''' if pa.types.is_list(lowercase__ ): return get_base_dtype(arr_type.value_type ) else: return arr_type def snake_case__ ( _A: Optional[int] , _A: Optional[Any] ) -> Optional[Any]: '''simple docstring''' if isinstance(lst[0] , lowercase__ ): change_first_primitive_element_in_list(lst[0] , lowercase__ ) else: lowerCAmelCase = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def snake_case__ ( _A: Dict , _A: Tuple , _A: Dict ) -> List[str]: '''simple docstring''' lowerCAmelCase = pa.array(TypedSequence(lowercase__ , optimized_int_type=lowercase__ ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def snake_case__ ( _A: int , _A: List[str] , _A: int ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase = pa.array(OptimizedTypedSequence(lowercase__ , col=lowercase__ ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications lowerCAmelCase = copy.deepcopy(lowercase__ ) lowerCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(lowercase__ , lowercase__ ) lowerCAmelCase = pa.array(OptimizedTypedSequence(lowercase__ , col=lowercase__ ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def snake_case__ ( _A: int , _A: Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=lowercase__ ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def snake_case__ ( _A: Optional[Any] ) -> int: '''simple docstring''' lowerCAmelCase = """mock://dataset-train.arrow""" with ArrowWriter(path=lowercase__ , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(lowercase__ ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) lowerCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(lowercase__ ) def snake_case__ ( ) -> Dict: '''simple docstring''' lowerCAmelCase = pa.BufferOutputStream() with ParquetWriter(stream=lowercase__ ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) lowerCAmelCase = writer.finalize() assert num_examples == 2 assert num_bytes > 0 lowerCAmelCase = pa.BufferReader(output.getvalue() ) lowerCAmelCase = pq.read_table(lowercase__ ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def snake_case__ ( _A: str , _A: List[Any] ) -> List[Any]: '''simple docstring''' import PIL.Image lowerCAmelCase = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowercase__ , format="""png""" ) lowerCAmelCase = pa.BufferOutputStream() with ParquetWriter( stream=lowercase__ , features=Features({"""image""": Image()} ) , embed_local_files=lowercase__ ) as writer: writer.write({"""image""": image_path} ) writer.finalize() lowerCAmelCase = pa.BufferReader(output.getvalue() ) lowerCAmelCase = pq.read_table(lowercase__ ) lowerCAmelCase = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , lowercase__ ) with open(lowercase__ , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def snake_case__ ( ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=lowercase__ )] ) lowerCAmelCase = pa.BufferOutputStream() with ArrowWriter(stream=lowercase__ ) as writer: writer._build_writer(inferred_schema=lowercase__ ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
272
"""simple docstring""" from PIL import Image def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image: '''simple docstring''' def brightness(lowercase__ : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(lowercase__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 __UpperCAmelCase = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
84
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE : str = { "configuration_bridgetower": [ "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP", "BridgeTowerConfig", "BridgeTowerTextConfig", "BridgeTowerVisionConfig", ], "processing_bridgetower": ["BridgeTowerProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Any = ["BridgeTowerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Any = [ "BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST", "BridgeTowerForContrastiveLearning", "BridgeTowerForImageAndTextRetrieval", "BridgeTowerForMaskedLM", "BridgeTowerModel", "BridgeTowerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
127
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _SCREAMING_SNAKE_CASE : def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Dict = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = inputs["""prompt"""] lowerCAmelCase_ :Optional[int] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Optional[int] = inputs["""output_type"""] if "image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""image"""] else: lowerCAmelCase_ :int = None if "mask_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""mask_image"""] else: lowerCAmelCase_ :int = None if "original_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""original_image"""] else: lowerCAmelCase_ :List[Any] = None lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A ) # inputs with prompt converted to embeddings lowerCAmelCase_ :List[str] = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :int = image if mask_image is not None: lowerCAmelCase_ :Tuple = mask_image if original_image is not None: lowerCAmelCase_ :Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__A , __A , __A ) lowerCAmelCase_ :Optional[int] = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Tuple = inputs["""output_type"""] # inputs with prompt converted to embeddings lowerCAmelCase_ :Tuple = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :Optional[int] = image if mask_image is not None: lowerCAmelCase_ :str = mask_image if original_image is not None: lowerCAmelCase_ :Tuple = original_image lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Any = self.get_dummy_components() lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Dict = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 )
84
0
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="session" ) def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = 10 __lowerCAmelCase = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string" ) ), "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ), "answers": datasets.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), "id": datasets.Value("int64" ), } ) __lowerCAmelCase = datasets.Dataset.from_dict( { "tokens": [["foo"] * 5] * n, "labels": [[1] * 5] * n, "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, "id": list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "file.arrow" ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files A : Optional[int] = "\\n Text data.\n Second line of data." @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """file.txt""" __lowerCAmelCase = FILE_CONTENT with open(lowercase__ , "w" ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' import bza __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """file.txt.bz2""" __lowerCAmelCase = bytes(lowercase__ , "utf-8" ) with bza.open(lowercase__ , "wb" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' import gzip __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" ) __lowerCAmelCase = bytes(lowercase__ , "utf-8" ) with gzip.open(lowercase__ , "wb" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """file.txt.lz4""" __lowerCAmelCase = bytes(lowercase__ , "utf-8" ) with lza.frame.open(lowercase__ , "wb" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """file.txt.7z""" with pyazr.SevenZipFile(lowercase__ , "w" ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' import tarfile __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """file.txt.tar""" with tarfile.TarFile(lowercase__ , "w" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' import lzma __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """file.txt.xz""" __lowerCAmelCase = bytes(lowercase__ , "utf-8" ) with lzma.open(lowercase__ , "wb" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' import zipfile __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """file.txt.zip""" with zipfile.ZipFile(lowercase__ , "w" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """file.txt.zst""" __lowerCAmelCase = bytes(lowercase__ , "utf-8" ) with zstd.open(lowercase__ , "wb" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """file.xml""" __lowerCAmelCase = textwrap.dedent( "\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" ) with open(lowercase__ , "w" ) as f: f.write(lowercase__ ) return filename A : List[str] = [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] A : int = [ {"col_1": "4", "col_2": 4, "col_3": 4.0}, {"col_1": "5", "col_2": 5, "col_3": 5.0}, ] A : Optional[int] = { "col_1": ["0", "1", "2", "3"], "col_2": [0, 1, 2, 3], "col_3": [0.0, 1.0, 2.0, 3.0], } A : Dict = [ {"col_3": 0.0, "col_1": "0", "col_2": 0}, {"col_3": 1.0, "col_1": "1", "col_2": 1}, ] A : Optional[Any] = [ {"col_1": "s0", "col_2": 0, "col_3": 0.0}, {"col_1": "s1", "col_2": 1, "col_3": 1.0}, {"col_1": "s2", "col_2": 2, "col_3": 2.0}, {"col_1": "s3", "col_2": 3, "col_3": 3.0}, ] @pytest.fixture(scope="session" ) def _lowerCamelCase ( ): '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = datasets.Dataset.from_dict(lowercase__ ) __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: __lowerCAmelCase = con.cursor() cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" ) for item in DATA: cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" ) with open(lowercase__ , "w" , newline="" ) as f: __lowerCAmelCase = csv.DictWriter(lowercase__ , fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" ) with open(lowercase__ , "w" , newline="" ) as f: __lowerCAmelCase = csv.DictWriter(lowercase__ , fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' import bza __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset.csv.bz2""" with open(lowercase__ , "rb" ) as f: __lowerCAmelCase = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , "wb" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , "w" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , "w" ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowercase__ , "w" ) as f: f.write(lowercase__ , arcname=os.path.join("main_dir" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("main_dir" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" ) __lowerCAmelCase = pa.schema( { "col_1": pa.string(), "col_2": pa.intaa(), "col_3": pa.floataa(), } ) with open(lowercase__ , "wb" ) as f: __lowerCAmelCase = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) __lowerCAmelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) __lowerCAmelCase = {"""data""": DATA} with open(lowercase__ , "w" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) __lowerCAmelCase = {"""data""": DATA_DICT_OF_LISTS} with open(lowercase__ , "w" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" ) with open(lowercase__ , "w" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + "\n" ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" ) with open(lowercase__ , "w" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + "\n" ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" ) with open(lowercase__ , "w" ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + "\n" ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" ) with open(lowercase__ , "w" ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + "\n" ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' import gzip __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" ) with open(lowercase__ , "rb" ) as orig_file: with gzip.open(lowercase__ , "wb" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' import gzip __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" ) with open(lowercase__ , "rb" ) as orig_file: with gzip.open(lowercase__ , "wb" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(lowercase__ , "w" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowercase__ , "w" ) as f: f.write(lowercase__ , arcname=os.path.join("nested" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowercase__ , "w" ) as f: f.write(lowercase__ , arcname=os.path.join("main_dir" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("main_dir" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset.jsonl.tar""" with tarfile.TarFile(lowercase__ , "w" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowercase__ , "w" ) as f: f.add(lowercase__ , arcname=os.path.join("nested" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = ["""0""", """1""", """2""", """3"""] __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" ) with open(lowercase__ , "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = ["""0""", """1""", """2""", """3"""] __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" ) with open(lowercase__ , "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = ["""0""", """1""", """2""", """3"""] __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset.abc""" with open(lowercase__ , "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset.text.zip""" with zipfile.ZipFile(lowercase__ , "w" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowercase__ , "w" ) as f: f.write(lowercase__ , arcname=os.path.join("main_dir" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("main_dir" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset.ext.zip""" with zipfile.ZipFile(lowercase__ , "w" ) as f: f.write(lowercase__ , arcname=os.path.basename("unsupported.ext" ) ) f.write(lowercase__ , arcname=os.path.basename("unsupported_2.ext" ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = """\n""".join(["First", "Second\u2029with Unicode new line", "Third"] ) __lowerCAmelCase = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" ) with open(lowercase__ , "w" , encoding="utf-8" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( ): '''simple docstring''' return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" ) @pytest.fixture(scope="session" ) def _lowerCamelCase ( ): '''simple docstring''' return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" ) @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / """dataset.img.zip""" with zipfile.ZipFile(lowercase__ , "w" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(".jpg" , "2.jpg" ) ) return path @pytest.fixture(scope="session" ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tmp_path_factory.mktemp("data_dir" ) (data_dir / "subdir").mkdir() with open(data_dir / "subdir" / "train.txt" , "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / "subdir" / "test.txt" , "w" ) as f: f.write("bar\n" * 10 ) # hidden file with open(data_dir / "subdir" / ".test.txt" , "w" ) as f: f.write("bar\n" * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / ".subdir" / "train.txt" , "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / ".subdir" / "test.txt" , "w" ) as f: f.write("bar\n" * 10 ) return data_dir
57
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :int = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :List[Any] = jax.device_count() lowerCAmelCase_ :Optional[Any] = num_samples * [prompt] lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Optional[Any] = replicate(__A ) lowerCAmelCase_ :Union[str, Any] = shard(__A ) lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2""" lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained( __A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :Optional[int] = scheduler_params lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :Tuple = jax.device_count() lowerCAmelCase_ :str = num_samples * [prompt] lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Tuple = replicate(__A ) lowerCAmelCase_ :Optional[int] = shard(__A ) lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
84
0
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors lowerCamelCase = logging.getLogger(__name__) class A ( A__ ): UpperCamelCase__ : List[Any] ="sequence-classification" def __init__( self : Tuple , lowercase_ : List[str] ) -> Any: """simple docstring""" if type(__A ) == dict: _lowerCamelCase : Tuple =Namespace(**__A ) _lowerCamelCase : Any =glue_output_modes[hparams.task] _lowerCamelCase : str =glue_tasks_num_labels[hparams.task] super().__init__(__A , __A , self.mode ) def lowerCamelCase ( self : List[Any] , **lowercase_ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.model(**__A ) def lowerCamelCase ( self : Any , lowercase_ : List[str] , lowercase_ : str ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : str ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _lowerCamelCase : Union[str, Any] =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None _lowerCamelCase : str =self(**__A ) _lowerCamelCase : Union[str, Any] =outputs[0] _lowerCamelCase : Dict =self.trainer.lr_schedulers[0]["""scheduler"""] _lowerCamelCase : Dict ={"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] =self.hparams _lowerCamelCase : Union[str, Any] =processors[args.task]() _lowerCamelCase : str =processor.get_labels() for mode in ["train", "dev"]: _lowerCamelCase : List[str] =self._feature_file(__A ) if os.path.exists(__A ) and not args.overwrite_cache: logger.info('Loading features from cached file %s' , __A ) else: logger.info('Creating features from dataset file at %s' , args.data_dir ) _lowerCamelCase : Optional[Any] =( processor.get_dev_examples(args.data_dir ) if mode == """dev""" else processor.get_train_examples(args.data_dir ) ) _lowerCamelCase : Optional[int] =convert_examples_to_features( __A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('Saving features into cached file %s' , __A ) torch.save(__A , __A ) def lowerCamelCase ( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] = False ) -> DataLoader: """simple docstring""" _lowerCamelCase : Dict ="""dev""" if mode == """test""" else mode _lowerCamelCase : Optional[Any] =self._feature_file(__A ) logger.info('Loading features from cached file %s' , __A ) _lowerCamelCase : Dict =torch.load(__A ) _lowerCamelCase : Tuple =torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _lowerCamelCase : int =torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _lowerCamelCase : Any =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _lowerCamelCase : Dict =torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _lowerCamelCase : Tuple =torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , ) def lowerCamelCase ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> Optional[int]: """simple docstring""" _lowerCamelCase : Tuple ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _lowerCamelCase : Optional[int] =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None _lowerCamelCase : Union[str, Any] =self(**__A ) _lowerCamelCase : Optional[int] =outputs[:2] _lowerCamelCase : List[str] =logits.detach().cpu().numpy() _lowerCamelCase : Dict =inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def lowerCamelCase ( self : List[str] , lowercase_ : Tuple ) -> tuple: """simple docstring""" _lowerCamelCase : int =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item() _lowerCamelCase : Optional[int] =np.concatenate([x['pred'] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _lowerCamelCase : List[str] =np.argmax(__A , axis=1 ) elif self.hparams.glue_output_mode == "regression": _lowerCamelCase : Tuple =np.squeeze(__A ) _lowerCamelCase : List[str] =np.concatenate([x['target'] for x in outputs] , axis=0 ) _lowerCamelCase : List[str] =[[] for _ in range(out_label_ids.shape[0] )] _lowerCamelCase : Dict =[[] for _ in range(out_label_ids.shape[0] )] _lowerCamelCase : Optional[Any] ={**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )} _lowerCamelCase : Tuple =dict(results.items() ) _lowerCamelCase : Optional[Any] =results return ret, preds_list, out_label_list def lowerCamelCase ( self : Any , lowercase_ : int ) -> dict: """simple docstring""" _lowerCamelCase : List[str] =self._eval_end(__A ) _lowerCamelCase : str =ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def lowerCamelCase ( self : int , lowercase_ : Dict ) -> dict: """simple docstring""" _lowerCamelCase : int =self._eval_end(__A ) _lowerCamelCase : List[Any] =ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def lowerCamelCase ( lowercase_ : int , lowercase_ : Any ) -> int: """simple docstring""" BaseTransformer.add_model_specific_args(__A , __A ) parser.add_argument( '--max_seq_length' , default=128 , type=__A , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , ) parser.add_argument( '--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) return parser def a_ ( ): '''simple docstring''' _lowerCamelCase : List[str] =argparse.ArgumentParser() add_generic_args(lowercase__ , os.getcwd() ) _lowerCamelCase : Any =GLUETransformer.add_model_specific_args(lowercase__ , os.getcwd() ) _lowerCamelCase : Union[str, Any] =parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _lowerCamelCase : Tuple =os.path.join( './results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , ) os.makedirs(args.output_dir ) _lowerCamelCase : Optional[int] =GLUETransformer(lowercase__ ) _lowerCamelCase : Optional[int] =generic_train(lowercase__ , lowercase__ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _lowerCamelCase : Optional[int] =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=lowercase__ ) ) _lowerCamelCase : Optional[int] =model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(lowercase__ ) if __name__ == "__main__": main()
199
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def _snake_case ( ) -> Generator[int, None, None]: '''simple docstring''' lowerCAmelCase_ :dict[int, int] = {} lowerCAmelCase_ :int = 2 while True: lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ ) if factor: lowerCAmelCase_ :Optional[int] = factor + prime while x in factor_map: x += factor lowerCAmelCase_ :List[str] = factor else: lowerCAmelCase_ :Optional[int] = prime yield prime prime += 1 def _snake_case ( lowercase__ : float = 1E10 ) -> int: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = sieve() lowerCAmelCase_ :str = 1 while True: lowerCAmelCase_ :int = next(lowercase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowercase__ ) n += 2 if __name__ == "__main__": print(solution())
84
0
import argparse from collections import defaultdict import yaml lowerCAmelCase__ = """docs/source/en/_toctree.yml""" def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> Any: '''simple docstring''' A__ = defaultdict(lowercase__ ) A__ = [] A__ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(lowercase__ ) A__ = new_doc_list A__ = [key for key, value in counts.items() if value > 1] A__ = [] for duplicate_key in duplicates: A__ = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(lowercase__ ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) A__ = sorted(lowercase__ , key=lambda SCREAMING_SNAKE_CASE_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(lowercase__ ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(lowercase__ ) # Sort return overview_doc def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any]=False ) -> str: '''simple docstring''' with open(lowercase__ , encoding="utf-8" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 A__ = api_doc[scheduler_idx]["""sections"""] A__ = clean_doc_toc(lowercase__ ) A__ = False if new_scheduler_doc != scheduler_doc: A__ = True if overwrite: A__ = new_scheduler_doc if diff: if overwrite: A__ = api_doc with open(lowercase__ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any=False ) -> int: '''simple docstring''' with open(lowercase__ , encoding="utf-8" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 A__ = False A__ = api_doc[pipeline_idx]["""sections"""] A__ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: A__ = pipeline_doc["""section"""] A__ = clean_doc_toc(lowercase__ ) if overwrite: A__ = new_sub_pipeline_doc new_pipeline_docs.append(lowercase__ ) # sort overall pipeline doc A__ = clean_doc_toc(lowercase__ ) if new_pipeline_docs != pipeline_docs: A__ = True if overwrite: A__ = new_pipeline_docs if diff: if overwrite: A__ = api_doc with open(lowercase__ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowerCAmelCase__ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
68
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx" def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]: lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) ) lowerCAmelCase_ :List[Any] = torch.manual_seed(__A ) lowerCAmelCase_ :Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :int = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = self.get_dummy_inputs() lowerCAmelCase_ :List[str] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :str = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs() lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Dict = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[int] = ort.SessionOptions() lowerCAmelCase_ :Dict = False return options def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :List[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :str = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :Dict = output.images lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :List[str] = init_image.resize((128, 128) ) lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" ) lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :int = output.images lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Union[str, Any] = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
84
0
from collections.abc import Sequence from queue import Queue class _snake_case : '''simple docstring''' def __init__( self: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Tuple=None ) -> List[Any]: UpperCAmelCase_ : Optional[int] = start UpperCAmelCase_ : Tuple = end UpperCAmelCase_ : int = val UpperCAmelCase_ : int = (start + end) // 2 UpperCAmelCase_ : Optional[Any] = left UpperCAmelCase_ : Dict = right def __repr__( self: Any ) -> List[str]: return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class _snake_case : '''simple docstring''' def __init__( self: List[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ) -> str: UpperCAmelCase_ : int = collection UpperCAmelCase_ : List[Any] = function if self.collection: UpperCAmelCase_ : Optional[int] = self._build_tree(0 ,len(__A ) - 1 ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> int: self._update_tree(self.root ,__A ,__A ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]: return self._query_range(self.root ,__A ,__A ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ) -> Union[str, Any]: if start == end: return SegmentTreeNode(__A ,__A ,self.collection[start] ) UpperCAmelCase_ : List[str] = (start + end) // 2 UpperCAmelCase_ : Tuple = self._build_tree(__A ,__A ) UpperCAmelCase_ : Tuple = self._build_tree(mid + 1 ,__A ) return SegmentTreeNode(__A ,__A ,self.fn(left.val ,right.val ) ,__A ,__A ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]: if node.start == i and node.end == i: UpperCAmelCase_ : Optional[Any] = val return if i <= node.mid: self._update_tree(node.left ,__A ,__A ) else: self._update_tree(node.right ,__A ,__A ) UpperCAmelCase_ : Union[str, Any] = self.fn(node.left.val ,node.right.val ) def A__ ( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> Dict: if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left ,__A ,__A ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left ,__A ,node.mid ) ,self._query_range(node.right ,node.mid + 1 ,__A ) ,) else: # range in right child tree return self._query_range(node.right ,__A ,__A ) def A__ ( self: Optional[int] ) -> Optional[Any]: if self.root is not None: UpperCAmelCase_ : int = Queue() queue.put(self.root ) while not queue.empty(): UpperCAmelCase_ : List[Any] = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('''*''' * 50) UpperCamelCase_ = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
345
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , ) assert hasattr(self , """env""" ) def __lowerCAmelCase ( self , __A ) -> Any: # configuration for running training on smdistributed Model Parallel lowerCAmelCase_ :Union[str, Any] = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase_ :Tuple = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , ) def __lowerCAmelCase ( self , __A ) -> List[Any]: TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def __lowerCAmelCase ( self , __A ) -> List[str]: # create estimator lowerCAmelCase_ :Any = self.create_estimator(__A ) # run training estimator.fit() # result dataframe lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase_ :Optional[int] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
84
0
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class a_ (A__ , unittest.TestCase ): __lowerCAmelCase : int = XLNetTokenizer __lowerCAmelCase : Optional[int] = XLNetTokenizerFast __lowerCAmelCase : Optional[int] = True __lowerCAmelCase : int = True def __UpperCamelCase ( self ): super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : Optional[int] = XLNetTokenizer(__A , keep_accents=__A ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[int] = """<s>""" _lowerCAmelCase : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A ) def __UpperCamelCase ( self ): _lowerCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<eod>""" ) self.assertEqual(len(__A ) , 1_0_0_6 ) def __UpperCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def __UpperCamelCase ( self ): _lowerCAmelCase : Any = XLNetTokenizer(__A , keep_accents=__A ) _lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] ) _lowerCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _lowerCAmelCase : int = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] ) _lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(__A ) self.assertListEqual( __A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def __UpperCamelCase ( self ): _lowerCAmelCase : Tuple = XLNetTokenizer(__A , do_lower_case=__A ) _lowerCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __A , [ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] ) def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[Any] = XLNetTokenizer(__A , do_lower_case=__A ) _lowerCAmelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) @slow def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) _lowerCAmelCase : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) _lowerCAmelCase : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) _lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(__A ) _lowerCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__A , __A ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def __UpperCamelCase ( self ): # fmt: off _lowerCAmelCase : Union[str, Any] = {"""input_ids""": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__A , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
309
"""simple docstring""" def _snake_case ( lowercase__ : int = 1_0 ) -> str: '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError("""Invalid input""" ) lowerCAmelCase_ :List[str] = 1_0**n lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(10) = }""")
84
0
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class A_ ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Union[str, Any]: UpperCAmelCase : List[Any] = 3 UpperCAmelCase : List[Any] = 250 UpperCAmelCase : Any = ids_tensor((batch_size, length) , __A ) UpperCAmelCase : List[Any] = torch.ones((batch_size, length) , device=__A , dtype=torch.float ) / length return input_ids, scores def UpperCAmelCase_ ( self : int ) -> List[str]: UpperCAmelCase : int = self._get_tensors(5 ) UpperCAmelCase : Union[str, Any] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__A , __A ) ) UpperCAmelCase : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__A , __A ) ) UpperCAmelCase : Dict = self._get_tensors(10 ) self.assertTrue(criteria(__A , __A ) ) def UpperCAmelCase_ ( self : List[str] ) -> int: UpperCAmelCase : List[Any] = MaxLengthCriteria(max_length=10 ) UpperCAmelCase : Any = self._get_tensors(5 ) self.assertFalse(criteria(__A , __A ) ) UpperCAmelCase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(__A , __A ) ) UpperCAmelCase : Union[str, Any] = self._get_tensors(10 ) self.assertTrue(criteria(__A , __A ) ) def UpperCAmelCase_ ( self : List[str] ) -> str: UpperCAmelCase : Dict = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) UpperCAmelCase : Dict = self._get_tensors(5 ) self.assertFalse(criteria(__A , __A ) ) UpperCAmelCase : Any = self._get_tensors(9 ) self.assertFalse(criteria(__A , __A ) ) UpperCAmelCase : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__A , __A ) ) UpperCAmelCase : List[str] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]: UpperCAmelCase : Any = self._get_tensors(5 ) UpperCAmelCase : List[Any] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__A , __A ) ) UpperCAmelCase : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__A , __A ) ) def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__A ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) UpperCAmelCase : Dict = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__A ) , 1 )
151
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = 'src/transformers' __UpperCAmelCase = 'docs/source/en/tasks' def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : Any ) -> str: '''simple docstring''' with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCAmelCase_ :List[Any] = f.readlines() # Find the start prompt. lowerCAmelCase_ :Tuple = 0 while not lines[start_index].startswith(lowercase__ ): start_index += 1 start_index += 1 lowerCAmelCase_ :Dict = start_index while not lines[end_index].startswith(lowercase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase_ :List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() ) lowerCAmelCase_ :Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def _snake_case ( lowercase__ : int , lowercase__ : str=False ) -> Dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = _find_text_in_file( filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , ) lowerCAmelCase_ :int = get_model_list_for_task(lowercase__ ) if current_list != new_list: if overwrite: with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" """ to fix this.""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
84
0
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor a_ : Any = logging.get_logger(__name__) class _snake_case ( A__ ): def __init__( self , *a , **a) -> None: warnings.warn( 'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use VideoMAEImageProcessor instead.' , __A , ) super().__init__(*__A , **__A)
137
"""simple docstring""" def _snake_case ( lowercase__ : list[int] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = [] if len(lowercase__ ) == 1: return [nums.copy()] for _ in range(len(lowercase__ ) ): lowerCAmelCase_ :Optional[Any] = nums.pop(0 ) lowerCAmelCase_ :str = permute(lowercase__ ) for perm in permutations: perm.append(lowercase__ ) result.extend(lowercase__ ) nums.append(lowercase__ ) return result def _snake_case ( lowercase__ : Tuple ) -> List[str]: '''simple docstring''' def backtrack(lowercase__ : str ): if start == len(lowercase__ ) - 1: output.append(nums[:] ) else: for i in range(lowercase__ , len(lowercase__ ) ): lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] backtrack(start + 1 ) lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] # backtrack lowerCAmelCase_ :int = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function __UpperCAmelCase = permutea([1, 2, 3]) print(res) doctest.testmod()
84
0
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class lowercase_ ( A__ ): """simple docstring""" UpperCAmelCase_ : jnp.ndarray UpperCAmelCase_ : jnp.ndarray class lowercase_ ( nn.Module ): """simple docstring""" UpperCAmelCase_ : int UpperCAmelCase_ : Tuple[int] = (16, 32, 96, 256) UpperCAmelCase_ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCAmelCase = [] for i in range(len(self.block_out_channels ) - 1 ): lowerCAmelCase = self.block_out_channels[i] lowerCAmelCase = self.block_out_channels[i + 1] lowerCAmelCase = nn.Conv( __A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(__A ) lowerCAmelCase = nn.Conv( __A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(__A ) lowerCAmelCase = blocks lowerCAmelCase = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , __SCREAMING_SNAKE_CASE ) ->Tuple: lowerCAmelCase = self.conv_in(__A ) lowerCAmelCase = nn.silu(__A ) for block in self.blocks: lowerCAmelCase = block(__A ) lowerCAmelCase = nn.silu(__A ) lowerCAmelCase = self.conv_out(__A ) return embedding @flax_register_to_config class lowercase_ ( nn.Module , A__ , A__ ): """simple docstring""" UpperCAmelCase_ : int = 32 UpperCAmelCase_ : int = 4 UpperCAmelCase_ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) UpperCAmelCase_ : Union[bool, Tuple[bool]] = False UpperCAmelCase_ : Tuple[int] = (320, 640, 1280, 1280) UpperCAmelCase_ : int = 2 UpperCAmelCase_ : Union[int, Tuple[int]] = 8 UpperCAmelCase_ : Optional[Union[int, Tuple[int]]] = None UpperCAmelCase_ : int = 1280 UpperCAmelCase_ : float = 0.0 UpperCAmelCase_ : bool = False UpperCAmelCase_ : jnp.dtype = jnp.floataa UpperCAmelCase_ : bool = True UpperCAmelCase_ : int = 0 UpperCAmelCase_ : str = "rgb" UpperCAmelCase_ : Tuple[int] = (16, 32, 96, 256) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->FrozenDict: # init input tensors lowerCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size) lowerCAmelCase = jnp.zeros(__A , dtype=jnp.floataa ) lowerCAmelCase = jnp.ones((1,) , dtype=jnp.intaa ) lowerCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) lowerCAmelCase = (1, 3, self.sample_size * 8, self.sample_size * 8) lowerCAmelCase = jnp.zeros(__A , dtype=jnp.floataa ) lowerCAmelCase = jax.random.split(__A ) lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng} return self.init(__A , __A , __A , __A , __A )["params"] def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = self.block_out_channels lowerCAmelCase = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowerCAmelCase = self.num_attention_heads or self.attention_head_dim # input lowerCAmelCase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time lowerCAmelCase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) lowerCAmelCase = FlaxTimestepEmbedding(__A , dtype=self.dtype ) lowerCAmelCase = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) lowerCAmelCase = self.only_cross_attention if isinstance(__A , __A ): lowerCAmelCase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(__A , __A ): lowerCAmelCase = (num_attention_heads,) * len(self.down_block_types ) # down lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = block_out_channels[0] lowerCAmelCase = nn.Conv( __A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(__A ) for i, down_block_type in enumerate(self.down_block_types ): lowerCAmelCase = output_channel lowerCAmelCase = block_out_channels[i] lowerCAmelCase = i == len(__A ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowerCAmelCase = FlaxCrossAttnDownBlockaD( in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: lowerCAmelCase = FlaxDownBlockaD( in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(__A ) for _ in range(self.layers_per_block ): lowerCAmelCase = nn.Conv( __A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(__A ) if not is_final_block: lowerCAmelCase = nn.Conv( __A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(__A ) lowerCAmelCase = down_blocks lowerCAmelCase = controlnet_down_blocks # mid lowerCAmelCase = block_out_channels[-1] lowerCAmelCase = FlaxUNetMidBlockaDCrossAttn( in_channels=__A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) lowerCAmelCase = nn.Conv( __A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , ) ->Union[FlaxControlNetOutput, Tuple]: lowerCAmelCase = self.controlnet_conditioning_channel_order if channel_order == "bgr": lowerCAmelCase = jnp.flip(__A , axis=1 ) # 1. time if not isinstance(__A , jnp.ndarray ): lowerCAmelCase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(__A , jnp.ndarray ) and len(timesteps.shape ) == 0: lowerCAmelCase = timesteps.astype(dtype=jnp.floataa ) lowerCAmelCase = jnp.expand_dims(__A , 0 ) lowerCAmelCase = self.time_proj(__A ) lowerCAmelCase = self.time_embedding(__A ) # 2. pre-process lowerCAmelCase = jnp.transpose(__A , (0, 2, 3, 1) ) lowerCAmelCase = self.conv_in(__A ) lowerCAmelCase = jnp.transpose(__A , (0, 2, 3, 1) ) lowerCAmelCase = self.controlnet_cond_embedding(__A ) sample += controlnet_cond # 3. down lowerCAmelCase = (sample,) for down_block in self.down_blocks: if isinstance(__A , __A ): lowerCAmelCase = down_block(__A , __A , __A , deterministic=not train ) else: lowerCAmelCase = down_block(__A , __A , deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowerCAmelCase = self.mid_block(__A , __A , __A , deterministic=not train ) # 5. contronet blocks lowerCAmelCase = () for down_block_res_sample, controlnet_block in zip(__A , self.controlnet_down_blocks ): lowerCAmelCase = controlnet_block(__A ) controlnet_down_block_res_samples += (down_block_res_sample,) lowerCAmelCase = controlnet_down_block_res_samples lowerCAmelCase = self.controlnet_mid_block(__A ) # 6. scaling lowerCAmelCase = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=__A , mid_block_res_sample=__A )
338
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = BioGptTokenizer UpperCAmelCase_ :str = False def __lowerCAmelCase ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[Any] = """lower newer""" lowerCAmelCase_ :Tuple = """lower newer""" return input_text, output_text def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase_ :Union[str, Any] = """lower""" lowerCAmelCase_ :Any = ["""low""", """er</w>"""] lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = tokens + ["""<unk>"""] lowerCAmelCase_ :List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
84
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase_ = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''DeiTFeatureExtractor'''] lowerCamelCase_ = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
244
"""simple docstring""" from ...configuration_utils import PretrainedConfig class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "bert-generation" def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Any = vocab_size lowerCAmelCase_ :List[Any] = hidden_size lowerCAmelCase_ :Optional[int] = num_hidden_layers lowerCAmelCase_ :int = num_attention_heads lowerCAmelCase_ :List[Any] = hidden_act lowerCAmelCase_ :Optional[Any] = intermediate_size lowerCAmelCase_ :List[Any] = hidden_dropout_prob lowerCAmelCase_ :int = attention_probs_dropout_prob lowerCAmelCase_ :Tuple = max_position_embeddings lowerCAmelCase_ :List[str] = initializer_range lowerCAmelCase_ :Union[str, Any] = layer_norm_eps lowerCAmelCase_ :List[str] = position_embedding_type lowerCAmelCase_ :Optional[int] = use_cache
84
0
'''simple docstring''' import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger __lowercase = get_logger(__name__) __lowercase = R'''\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n''' class a__: '''simple docstring''' @add_start_docstrings(__A) def __call__( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called.") class a__: '''simple docstring''' @add_start_docstrings(__A) def __call__( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called.") class a__( A__ ): '''simple docstring''' @add_start_docstrings(__A) def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" for processor in self: lowerCAmelCase = inspect.signature(processor.__call__).parameters if len(__A) > 3: if not all(arg in kwargs for arg in list(function_args.keys())[2:]): raise ValueError( f"Make sure that all the required parameters: {list(function_args.keys())} for " f"{processor.__class__} are passed to the logits processor.") lowerCAmelCase = processor(__A , __A , __A , **__A) else: lowerCAmelCase = processor(__A , __A , __A) return scores class a__( A__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase): """simple docstring""" if not isinstance(__A , __A) or not (temperature > 0): raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}") lowerCAmelCase = temperature def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = scores / self.temperature return scores class a__( A__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase = -float("""Inf""") , __lowerCAmelCase = 1): """simple docstring""" if not isinstance(__A , __A) or (top_p < 0 or top_p > 1.0): raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") if not isinstance(__A , __A) or (min_tokens_to_keep < 1): raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") lowerCAmelCase = top_p lowerCAmelCase = filter_value lowerCAmelCase = min_tokens_to_keep def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = lax.top_k(__A , scores.shape[-1]) lowerCAmelCase = jnp.full_like(__A , self.filter_value) lowerCAmelCase = jax.nn.softmax(__A , axis=-1).cumsum(axis=-1) lowerCAmelCase = cumulative_probs < self.top_p # include the token that is higher than top_p as well lowerCAmelCase = jnp.roll(__A , 1) score_mask |= score_mask.at[:, 0].set(__A) # min tokens to keep lowerCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(__A) lowerCAmelCase = jnp.where(__A , __A , __A) lowerCAmelCase = jax.lax.sort_key_val(__A , __A)[-1] return next_scores class a__( A__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase = -float("""Inf""") , __lowerCAmelCase = 1): """simple docstring""" if not isinstance(__A , __A) or top_k <= 0: raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") lowerCAmelCase = max(__A , __A) lowerCAmelCase = filter_value def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = scores.shape lowerCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value) lowerCAmelCase = min(self.top_k , scores.shape[-1]) # Safety check lowerCAmelCase = lax.top_k(__A , __A) lowerCAmelCase = jnp.broadcast_to((jnp.arange(__A) * vocab_size)[:, None] , (batch_size, topk)).flatten() lowerCAmelCase = topk_scores.flatten() lowerCAmelCase = topk_indices.flatten() + shift lowerCAmelCase = next_scores_flat.at[topk_indices_flat].set(__A) lowerCAmelCase = next_scores_flat.reshape(__A , __A) return next_scores class a__( A__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = bos_token_id def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = jnp.full(scores.shape , -float("""inf""")) lowerCAmelCase = 1 - jnp.bool_(cur_len - 1) lowerCAmelCase = jnp.where(__A , new_scores.at[:, self.bos_token_id].set(0) , __A) return scores class a__( A__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = max_length lowerCAmelCase = eos_token_id def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = jnp.full(scores.shape , -float("""inf""")) lowerCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1) lowerCAmelCase = jnp.where(__A , new_scores.at[:, self.eos_token_id].set(0) , __A) return scores class a__( A__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" if not isinstance(__A , __A) or min_length < 0: raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") if not isinstance(__A , __A) or eos_token_id < 0: raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") lowerCAmelCase = min_length lowerCAmelCase = eos_token_id def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1) lowerCAmelCase = jnp.where(__A , scores.at[:, self.eos_token_id].set(-float("""inf""")) , __A) return scores class a__( A__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = list(__A) lowerCAmelCase = begin_index def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index) lowerCAmelCase = jnp.where(__A , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""")) , __A) return scores class a__( A__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = list(__A) def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = scores.at[..., self.suppress_tokens].set(-float("""inf""")) return scores class a__( A__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = dict(__A) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. lowerCAmelCase = jnp.ones((max(force_token_map.keys()) + 1) , dtype=jnp.intaa) * -1 for index, token in force_token_map.items(): if token is not None: lowerCAmelCase = force_token_array.at[index].set(__A) lowerCAmelCase = jnp.intaa(__A) def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" def _force_token(__lowerCAmelCase): lowerCAmelCase = scores.shape[0] lowerCAmelCase = self.force_token_array[generation_idx] lowerCAmelCase = jnp.ones_like(__A , dtype=scores.dtype) * -float("""inf""") lowerCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype) lowerCAmelCase = lax.dynamic_update_slice(__A , __A , (0, current_token)) return new_scores lowerCAmelCase = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(__A) , lambda: scores , ) , ) return scores class a__( A__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = generate_config.eos_token_id lowerCAmelCase = generate_config.no_timestamps_token_id lowerCAmelCase = generate_config.no_timestamps_token_id + 1 lowerCAmelCase = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(__A , """max_initial_timestamp_index"""): lowerCAmelCase = generate_config.max_initial_timestamp_index else: lowerCAmelCase = model_config.vocab_size if self.max_initial_timestamp_index is None: lowerCAmelCase = model_config.vocab_size def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""")) def handle_pairs(__lowerCAmelCase , __lowerCAmelCase): lowerCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , __A , __A) lowerCAmelCase = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __A , ) lowerCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , __A , __A) lowerCAmelCase = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , __A , __A , ) return jnp.where( __A , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""")) , scores_k.at[: self.eos_token_id].set(-float("""inf""")) , ) , __A , ) lowerCAmelCase = jax.vmap(__A)(__A , __A) lowerCAmelCase = jnp.where(cur_len == self.begin_index , __A , __A) lowerCAmelCase = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __A , ) lowerCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index lowerCAmelCase = jnp.where( __A , scores.at[:, last_allowed + 1 :].set(-float("""inf""")) , __A , ) # if sum of probability over timestamps is above any other token, sample timestamp lowerCAmelCase = jax.nn.log_softmax(__A , axis=-1) def handle_cumulative_probs(__lowerCAmelCase , __lowerCAmelCase): lowerCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1) lowerCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin]) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""")) , __A , ) lowerCAmelCase = jax.vmap(__A)(__A , __A) return scores
272
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [False] * len(lowercase__ ) lowerCAmelCase_ :str = [] queue.append(lowercase__ ) lowerCAmelCase_ :Any = True while queue: lowerCAmelCase_ :Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :int = u return visited[t] def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ )) lowerCAmelCase_ :str = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = float("""Inf""" ) lowerCAmelCase_ :List[str] = sink while s != source: # Find the minimum value in select path lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] ) lowerCAmelCase_ :Union[str, Any] = parent[s] max_flow += path_flow lowerCAmelCase_ :Tuple = sink while v != source: lowerCAmelCase_ :List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase_ :Union[str, Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
84
0
import numpy # List of input, output pairs _SCREAMING_SNAKE_CASE : Union[str, Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) _SCREAMING_SNAKE_CASE : List[Any] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) _SCREAMING_SNAKE_CASE : Tuple = [2, 4, 1, 5] _SCREAMING_SNAKE_CASE : Optional[int] = len(train_data) _SCREAMING_SNAKE_CASE : Optional[Any] = 0.009 def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_="train" ): """simple docstring""" return calculate_hypothesis_value(lowercase__ ,lowercase__ ) - output( lowercase__ ,lowercase__ ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" snake_case = 0 for i in range(len(lowercase__ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=m ): """simple docstring""" snake_case = 0 for i in range(lowercase__ ): if index == -1: summation_value += _error(lowercase__ ) else: summation_value += _error(lowercase__ ) * train_data[i][0][index] return summation_value def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" snake_case = summation_of_cost_derivative(lowercase__ ,lowercase__ ) / m return cost_derivative_value def UpperCAmelCase__ (): """simple docstring""" global parameter_vector # Tune these values to set a tolerance value for predicted output snake_case = 0.00_0002 snake_case = 0 snake_case = 0 while True: j += 1 snake_case = [0, 0, 0, 0] for i in range(0 ,len(lowercase__ ) ): snake_case = get_cost_derivative(i - 1 ) snake_case = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowercase__ ,lowercase__ ,atol=lowercase__ ,rtol=lowercase__ ,): break snake_case = temp_parameter_vector print(('''Number of iterations:''', j) ) def UpperCAmelCase__ (): """simple docstring""" for i in range(len(lowercase__ ) ): print(('''Actual output value:''', output(lowercase__ ,'''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(lowercase__ ,'''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
127
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 1_0 lowerCAmelCase_ :Optional[int] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) lowerCAmelCase_ :int = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0, """id""": list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" lowerCAmelCase_ :List[Any] = FILE_CONTENT with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> Tuple: '''simple docstring''' import bza lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' import gzip lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with gzip.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" ) with lza.frame.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any: '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' import tarfile lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' import lzma lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" ) with lzma.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import zipfile lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" ) with zstd.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml""" lowerCAmelCase_ :Any = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Any: '''simple docstring''' lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ ) lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: lowerCAmelCase_ :Union[str, Any] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' import bza lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(lowercase__ , """rb""" ) as f: lowerCAmelCase_ :Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) lowerCAmelCase_ :Optional[Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(lowercase__ , """wb""" ) as f: lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> int: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Tuple: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) return data_dir
84
0
"""simple docstring""" from math import sqrt def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCamelCase ( _UpperCamelCase = 1_0001 ): '''simple docstring''' __lowerCAmelCase = 0 __lowerCAmelCase = 1 while count != nth and number < 3: number += 1 if is_prime(lowercase__ ): count += 1 while count != nth: number += 2 if is_prime(lowercase__ ): count += 1 return number if __name__ == "__main__": print(f'''{solution() = }''')
57
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[Any] = "data2vec-text" def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Dict = vocab_size lowerCAmelCase_ :Dict = hidden_size lowerCAmelCase_ :int = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :Any = hidden_act lowerCAmelCase_ :Optional[int] = intermediate_size lowerCAmelCase_ :str = hidden_dropout_prob lowerCAmelCase_ :Any = attention_probs_dropout_prob lowerCAmelCase_ :str = max_position_embeddings lowerCAmelCase_ :int = type_vocab_size lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :List[Any] = layer_norm_eps lowerCAmelCase_ :List[Any] = position_embedding_type lowerCAmelCase_ :List[Any] = use_cache lowerCAmelCase_ :List[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
84
0
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' _lowerCamelCase : Any =0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' _lowerCamelCase : Tuple =0 while b > 0: if b & 1: _lowerCamelCase : str =((res % c) + (a % c)) % c a += a b >>= 1 return res
199
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase_ :Tuple = (wi_a, wi_a) else: lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ :List[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ :Optional[Any] = layer_norm lowerCAmelCase_ :Any = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Tuple = q.T lowerCAmelCase_ :str = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :List[Any] = wi[0].T lowerCAmelCase_ :Dict = wi[1].T else: lowerCAmelCase_ :int = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ :List[Any] = layer_norm lowerCAmelCase_ :List[str] = k.T lowerCAmelCase_ :Any = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :Dict = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ :Optional[int] = layer_norm lowerCAmelCase_ :str = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :int = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ :List[Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :Any = wi[0].T lowerCAmelCase_ :Any = wi[1].T else: lowerCAmelCase_ :Tuple = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ :Optional[Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T return new def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ :Any = state_dict["""shared.weight"""] return state_dict def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
84
0
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class a__ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , ) -> List[str]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 1 def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , ) return config, pixel_values def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = FlaxViTModel(config=__A ) A__ = model(__A ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) A__ = (self.image_size, self.image_size) A__ = (self.patch_size, self.patch_size) A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def UpperCamelCase ( self , lowercase , lowercase ) -> List[str]: '''simple docstring''' A__ = self.type_sequence_label_size A__ = FlaxViTForImageClassification(config=__A ) A__ = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ = 1 A__ = FlaxViTForImageClassification(__A ) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(__A ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( A__ ) = config_and_inputs A__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class a__ ( A__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def UpperCamelCase ( self ) -> None: '''simple docstring''' A__ = FlaxViTModelTester(self ) A__ = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 ) def UpperCamelCase ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__A ) A__ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __A ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A__ = self._prepare_for_class(__A , __A ) A__ = model_class(__A ) @jax.jit def model_jitted(lowercase , **lowercase ): return model(pixel_values=__A , **__A ) with self.subTest("JIT Enabled" ): A__ = model_jitted(**__A ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): A__ = model_jitted(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) ) for jitted_output, output in zip(__A , __A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained("google/vit-base-patch16-224" ) A__ = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__A )
68
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Optional[Any] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" ) if "norm" in key: lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" ) if "layer_norm1" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )] lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" ) if "attn.q" in key: lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" ) if "bot_conv" in key: lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCAmelCase_ :List[Any] = value return new_state_dict def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ :Optional[Any] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ :List[Any] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :] def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int: '''simple docstring''' lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor() # prepare image lowerCAmelCase_ :List[Any] = prepare_img() lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) ) # rename keys lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ ) # key and value matrices need special treatment read_in_k_v(lowercase__ , lowercase__ ) # create HuggingFace model and load state dict lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # forward pass lowerCAmelCase_ :Dict = model(lowercase__ ) lowerCAmelCase_ :Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase_ :Optional[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase_ :Any = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __UpperCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
84
0
from collections import deque from .hash_table import HashTable class _snake_case ( A__ ): '''simple docstring''' def __init__( self: List[Any] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: List[str] ) -> List[str]: super().__init__(*__A ,**__A ) def A__ ( self: Optional[int] ,lowerCamelCase_: int ,lowerCamelCase_: List[str] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__A ) UpperCAmelCase_ : List[str] = self.values[key] def A__ ( self: str ) -> Optional[Any]: return ( sum(self.charge_factor - len(__A ) for slot in self.values ) / self.size_table * self.charge_factor ) def A__ ( self: Optional[int] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=None ) -> List[str]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__A ) == 0 ): return key return super()._collision_resolution(__A ,__A )
345
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
0
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class a_ (unittest.TestCase ): def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=3_0 , snake_case_=4_0_0 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=True , snake_case_=1 / 2_5_5 , snake_case_=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _lowerCAmelCase : Dict = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} _lowerCAmelCase : int = parent _lowerCAmelCase : Optional[Any] = batch_size _lowerCAmelCase : str = num_channels _lowerCAmelCase : int = min_resolution _lowerCAmelCase : Tuple = max_resolution _lowerCAmelCase : Any = do_resize _lowerCAmelCase : int = size _lowerCAmelCase : Optional[Any] = do_normalize _lowerCAmelCase : Optional[int] = image_mean _lowerCAmelCase : Tuple = image_std _lowerCAmelCase : Any = do_rescale _lowerCAmelCase : List[Any] = rescale_factor _lowerCAmelCase : str = do_pad def __UpperCamelCase ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCamelCase ( self , snake_case_ , snake_case_=False ): if not batched: _lowerCAmelCase : Optional[Any] = image_inputs[0] if isinstance(__A , Image.Image ): _lowerCAmelCase : int = image.size else: _lowerCAmelCase : Optional[Any] = image.shape[1], image.shape[2] if w < h: _lowerCAmelCase : Tuple = int(self.size["""shortest_edge"""] * h / w ) _lowerCAmelCase : List[Any] = self.size["""shortest_edge"""] elif w > h: _lowerCAmelCase : List[str] = self.size["""shortest_edge"""] _lowerCAmelCase : Dict = int(self.size["""shortest_edge"""] * w / h ) else: _lowerCAmelCase : Dict = self.size["""shortest_edge"""] _lowerCAmelCase : Tuple = self.size["""shortest_edge"""] else: _lowerCAmelCase : Union[str, Any] = [] for image in image_inputs: _lowerCAmelCase : int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _lowerCAmelCase : List[Any] = max(__A , key=lambda snake_case_ : item[0] )[0] _lowerCAmelCase : Any = max(__A , key=lambda snake_case_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a_ (A__ , unittest.TestCase ): __lowerCAmelCase : int = DetaImageProcessor if is_vision_available() else None def __UpperCamelCase ( self ): _lowerCAmelCase : Tuple = DetaImageProcessingTester(self ) @property def __UpperCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self ): _lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , """image_mean""" ) ) self.assertTrue(hasattr(__A , """image_std""" ) ) self.assertTrue(hasattr(__A , """do_normalize""" ) ) self.assertTrue(hasattr(__A , """do_resize""" ) ) self.assertTrue(hasattr(__A , """do_rescale""" ) ) self.assertTrue(hasattr(__A , """do_pad""" ) ) self.assertTrue(hasattr(__A , """size""" ) ) def __UpperCamelCase ( self ): _lowerCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) def __UpperCamelCase ( self ): pass def __UpperCamelCase ( self ): # Initialize image_processing _lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input _lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values _lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(__A , batched=__A ) _lowerCAmelCase : Dict = image_processing(__A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCamelCase ( self ): # Initialize image_processing _lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input _lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values _lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values _lowerCAmelCase : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCamelCase ( self ): # Initialize image_processing _lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input _lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values _lowerCAmelCase : int = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase : List[Any] = image_processing(__A , return_tensors="""pt""" ).pixel_values _lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __UpperCamelCase ( self ): # prepare image and target _lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: _lowerCAmelCase : Dict = json.loads(f.read() ) _lowerCAmelCase : List[Any] = {"""image_id""": 3_9_7_6_9, """annotations""": target} # encode them _lowerCAmelCase : Any = DetaImageProcessor() _lowerCAmelCase : Tuple = image_processing(images=__A , annotations=__A , return_tensors="""pt""" ) # verify pixel values _lowerCAmelCase : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["""pixel_values"""].shape , __A ) _lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1E-4 ) ) # verify area _lowerCAmelCase : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) ) # verify boxes _lowerCAmelCase : Dict = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A ) _lowerCAmelCase : List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1E-3 ) ) # verify image_id _lowerCAmelCase : Optional[int] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) ) # verify is_crowd _lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) ) # verify class_labels _lowerCAmelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) ) # verify orig_size _lowerCAmelCase : List[Any] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) ) # verify size _lowerCAmelCase : Any = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) ) @slow def __UpperCamelCase ( self ): # prepare image, target and masks_path _lowerCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: _lowerCAmelCase : int = json.loads(f.read() ) _lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target} _lowerCAmelCase : Optional[int] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them _lowerCAmelCase : int = DetaImageProcessor(format="""coco_panoptic""" ) _lowerCAmelCase : Union[str, Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" ) # verify pixel values _lowerCAmelCase : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["""pixel_values"""].shape , __A ) _lowerCAmelCase : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1E-4 ) ) # verify area _lowerCAmelCase : List[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) ) # verify boxes _lowerCAmelCase : int = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A ) _lowerCAmelCase : Optional[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1E-3 ) ) # verify image_id _lowerCAmelCase : Union[str, Any] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) ) # verify is_crowd _lowerCAmelCase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) ) # verify class_labels _lowerCAmelCase : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) ) # verify masks _lowerCAmelCase : Dict = 8_2_2_8_7_3 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A ) # verify orig_size _lowerCAmelCase : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) ) # verify size _lowerCAmelCase : List[Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
309
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "levit" def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ :Tuple = image_size lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :Union[str, Any] = kernel_size lowerCAmelCase_ :Optional[Any] = stride lowerCAmelCase_ :Optional[int] = padding lowerCAmelCase_ :Optional[Any] = hidden_sizes lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :int = depths lowerCAmelCase_ :List[str] = key_dim lowerCAmelCase_ :str = drop_path_rate lowerCAmelCase_ :Optional[int] = patch_size lowerCAmelCase_ :Union[str, Any] = attention_ratio lowerCAmelCase_ :Dict = mlp_ratio lowerCAmelCase_ :Any = initializer_range lowerCAmelCase_ :Optional[int] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
84
0
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A_ ( A__ ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = "char" UpperCAmelCase_ : Tuple = "bpe" UpperCAmelCase_ : Optional[Any] = "wp" lowercase__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A_ ( A__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = ["image_processor", "char_tokenizer"] UpperCAmelCase_ : str = "ViTImageProcessor" UpperCAmelCase_ : List[Any] = "MgpstrTokenizer" def __init__( self : str , lowercase_ : List[Any]=None , lowercase_ : List[Any]=None , **lowercase_ : int ) -> int: UpperCAmelCase : Dict = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __A , ) UpperCAmelCase : List[Any] = kwargs.pop('feature_extractor' ) UpperCAmelCase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) UpperCAmelCase : Optional[Any] = tokenizer UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained('gpt2' ) UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(__A , __A ) def __call__( self : int , lowercase_ : List[str]=None , lowercase_ : int=None , lowercase_ : Optional[int]=None , **lowercase_ : Optional[int] ) -> Dict: if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: UpperCAmelCase : List[str] = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None: UpperCAmelCase : Tuple = self.char_tokenizer(__A , return_tensors=__A , **__A ) if text is None: return inputs elif images is None: return encodings else: UpperCAmelCase : Optional[Any] = encodings["""input_ids"""] return inputs def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[Any]: UpperCAmelCase : int = sequences UpperCAmelCase : Any = char_preds.size(0 ) UpperCAmelCase : Optional[int] = self._decode_helper(__A , 'char' ) UpperCAmelCase : Dict = self._decode_helper(__A , 'bpe' ) UpperCAmelCase : Union[str, Any] = self._decode_helper(__A , 'wp' ) UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : str = [] for i in range(__A ): UpperCAmelCase : int = [char_scores[i], bpe_scores[i], wp_scores[i]] UpperCAmelCase : Optional[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] UpperCAmelCase : Dict = scores.index(max(__A ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) UpperCAmelCase : Dict = {} UpperCAmelCase : str = final_strs UpperCAmelCase : str = final_scores UpperCAmelCase : str = char_strs UpperCAmelCase : Any = bpe_strs UpperCAmelCase : List[Any] = wp_strs return out def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict , lowercase_ : List[str] ) -> Union[str, Any]: if format == DecodeType.CHARACTER: UpperCAmelCase : List[str] = self.char_decode UpperCAmelCase : Optional[int] = 1 UpperCAmelCase : Dict = """[s]""" elif format == DecodeType.BPE: UpperCAmelCase : str = self.bpe_decode UpperCAmelCase : int = 2 UpperCAmelCase : Optional[Any] = """#""" elif format == DecodeType.WORDPIECE: UpperCAmelCase : Optional[int] = self.wp_decode UpperCAmelCase : Tuple = 102 UpperCAmelCase : Union[str, Any] = """[SEP]""" else: raise ValueError(f"""Format {format} is not supported.""" ) UpperCAmelCase : Optional[int] = [], [] UpperCAmelCase : List[str] = pred_logits.size(0 ) UpperCAmelCase : Any = pred_logits.size(1 ) UpperCAmelCase : Union[str, Any] = pred_logits.topk(1 , dim=-1 , largest=__A , sorted=__A ) UpperCAmelCase : Any = preds_index.view(-1 , __A )[:, 1:] UpperCAmelCase : str = decoder(__A ) UpperCAmelCase : Tuple = torch.nn.functional.softmax(__A , dim=2 ).max(dim=2 ) UpperCAmelCase : Optional[Any] = preds_max_prob[:, 1:] for index in range(__A ): UpperCAmelCase : Optional[int] = preds_str[index].find(__A ) UpperCAmelCase : Optional[int] = preds_str[index][:pred_eos] UpperCAmelCase : List[str] = preds_index[index].cpu().tolist() UpperCAmelCase : List[str] = pred_index.index(__A ) if eos_token in pred_index else -1 UpperCAmelCase : List[Any] = preds_max_prob[index][: pred_eos_index + 1] UpperCAmelCase : List[Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__A ) conf_scores.append(__A ) return dec_strs, conf_scores def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Any ) -> List[str]: UpperCAmelCase : Optional[int] = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(__A )] return decode_strs def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[int] ) -> Tuple: return self.bpe_tokenizer.batch_decode(__A ) def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[int] ) -> List[str]: UpperCAmelCase : List[Any] = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(__A )] return decode_strs
151
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Dict = 0.01 with locka.acquire(): with pytest.raises(lowercase__ ): lowerCAmelCase_ :List[Any] = time.time() locka.acquire(lowercase__ ) assert time.time() - _start > timeout def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock""" lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(lowercase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 lowerCAmelCase_ :Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase__ ): locka.acquire(0 )
84
0
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=0.9_99 , _UpperCAmelCase="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(_UpperCAmelCase): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_UpperCAmelCase): return math.exp(t * -12.0) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''') SCREAMING_SNAKE_CASE = [] for i in range(lowercase__): SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowercase__) / alpha_bar_fn(lowercase__) , lowercase__)) return torch.tensor(lowercase__ , dtype=torch.floataa) class _snake_case ( A__ , A__ ): _lowercase : Optional[int] = [e.name for e in KarrasDiffusionSchedulers] _lowercase : Optional[Any] = 2 @register_to_config def __init__( self , a = 1000 , a = 0.0_00_85 , a = 0.0_12 , a = "linear" , a = None , a = "epsilon" , a = "linspace" , a = 0 , ) -> Tuple: if trained_betas is not None: SCREAMING_SNAKE_CASE = torch.tensor(__A , dtype=torch.floataa) elif beta_schedule == "linear": SCREAMING_SNAKE_CASE = torch.linspace(__A , __A , __A , dtype=torch.floataa) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. SCREAMING_SNAKE_CASE = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __A , dtype=torch.floataa) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule SCREAMING_SNAKE_CASE = betas_for_alpha_bar(__A) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''') SCREAMING_SNAKE_CASE = 1.0 - self.betas SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas , dim=0) # set all values self.set_timesteps(__A , __A , __A) def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> List[str]: if schedule_timesteps is None: SCREAMING_SNAKE_CASE = self.timesteps SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter) == 0: SCREAMING_SNAKE_CASE = 1 if len(__A) > 1 else 0 else: SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(__A) else timestep SCREAMING_SNAKE_CASE = self._index_counter[timestep_int] return indices[pos].item() @property def SCREAMING_SNAKE_CASE__ ( self) -> str: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def SCREAMING_SNAKE_CASE__ ( self , a , a , ) -> torch.FloatTensor: SCREAMING_SNAKE_CASE = self.index_for_timestep(__A) if self.state_in_first_order: SCREAMING_SNAKE_CASE = self.sigmas[step_index] else: SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index] SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5) return sample def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , ) -> Tuple: SCREAMING_SNAKE_CASE = num_inference_steps SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": SCREAMING_SNAKE_CASE = np.linspace(0 , num_train_timesteps - 1 , __A , dtype=__A)[::-1].copy() elif self.config.timestep_spacing == "leading": SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE = (np.arange(0 , __A) * step_ratio).round()[::-1].copy().astype(__A) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE = (np.arange(__A , 0 , -step_ratio)).round().copy().astype(__A) timesteps -= 1 else: raise ValueError( f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''') SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) SCREAMING_SNAKE_CASE = torch.from_numpy(np.log(__A)).to(__A) SCREAMING_SNAKE_CASE = np.interp(__A , np.arange(0 , len(__A)) , __A) SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]]).astype(np.floataa) SCREAMING_SNAKE_CASE = torch.from_numpy(__A).to(device=__A) # interpolate sigmas SCREAMING_SNAKE_CASE = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp() SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) SCREAMING_SNAKE_CASE = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]]) if str(__A).startswith('mps'): # mps does not support float64 SCREAMING_SNAKE_CASE = torch.from_numpy(__A).to(__A , dtype=torch.floataa) else: SCREAMING_SNAKE_CASE = torch.from_numpy(__A).to(__A) # interpolate timesteps SCREAMING_SNAKE_CASE = self.sigma_to_t(__A).to(__A , dtype=timesteps.dtype) SCREAMING_SNAKE_CASE = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten() SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], interleaved_timesteps]) SCREAMING_SNAKE_CASE = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter SCREAMING_SNAKE_CASE = defaultdict(__A) def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]: # get log sigma SCREAMING_SNAKE_CASE = sigma.log() # get distribution SCREAMING_SNAKE_CASE = log_sigma - self.log_sigmas[:, None] # get sigmas range SCREAMING_SNAKE_CASE = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2) SCREAMING_SNAKE_CASE = low_idx + 1 SCREAMING_SNAKE_CASE = self.log_sigmas[low_idx] SCREAMING_SNAKE_CASE = self.log_sigmas[high_idx] # interpolate sigmas SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high) SCREAMING_SNAKE_CASE = w.clamp(0 , 1) # transform interpolation to time range SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx SCREAMING_SNAKE_CASE = t.view(sigma.shape) return t @property def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: return self.sample is None def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a = True , ) -> Union[SchedulerOutput, Tuple]: SCREAMING_SNAKE_CASE = self.index_for_timestep(__A) # advance index counter by 1 SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(__A) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: SCREAMING_SNAKE_CASE = self.sigmas[step_index] SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index + 1] SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1] SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index] SCREAMING_SNAKE_CASE = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol SCREAMING_SNAKE_CASE = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('prediction_type not implemented yet: sample') else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''') if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat # 3. delta timestep SCREAMING_SNAKE_CASE = sigma_interpol - sigma_hat # store for 2nd order step SCREAMING_SNAKE_CASE = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep SCREAMING_SNAKE_CASE = sigma_next - sigma_hat SCREAMING_SNAKE_CASE = self.sample SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(__A): # mps does not support float64 SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device , dtype=torch.floataa) SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device , dtype=torch.floataa) else: SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device) SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device) SCREAMING_SNAKE_CASE = [self.index_for_timestep(__A , __A) for t in timesteps] SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1) SCREAMING_SNAKE_CASE = original_samples + noise * sigma return noisy_samples def __len__( self) -> List[str]: return self.config.num_train_timesteps
137
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s __UpperCAmelCase = 3e8 # unit of c : m * s^-1 def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase_ :Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
84
0
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
338
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int: '''simple docstring''' if len(lowercase__ ) != len(lowercase__ ): raise ValueError("""String lengths must match!""" ) lowerCAmelCase_ :Optional[int] = 0 for chara, chara in zip(lowercase__ , lowercase__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
84
0
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ (self ): # clean up the VRAM after each test super().tearDown() gc.collect() def UpperCAmelCase_ (self ): UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) UpperCamelCase__ = """A painting of a squirrel eating a burger""" UpperCamelCase__ = jax.device_count() UpperCamelCase__ = num_samples * [prompt] UpperCamelCase__ = sd_pipe.prepare_inputs(__A ) UpperCamelCase__ = replicate(__A ) UpperCamelCase__ = shard(__A ) UpperCamelCase__ = jax.random.PRNGKey(0 ) UpperCamelCase__ = jax.random.split(__A , jax.device_count() ) UpperCamelCase__ = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1] UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCamelCase__ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] ) print(F"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def UpperCAmelCase_ (self ): UpperCamelCase__ = """stabilityai/stable-diffusion-2""" UpperCamelCase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" ) UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained( __A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , ) UpperCamelCase__ = scheduler_params UpperCamelCase__ = """A painting of a squirrel eating a burger""" UpperCamelCase__ = jax.device_count() UpperCamelCase__ = num_samples * [prompt] UpperCamelCase__ = sd_pipe.prepare_inputs(__A ) UpperCamelCase__ = replicate(__A ) UpperCamelCase__ = shard(__A ) UpperCamelCase__ = jax.random.PRNGKey(0 ) UpperCamelCase__ = jax.random.split(__A , jax.device_count() ) UpperCamelCase__ = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1] UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCamelCase__ = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(F"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
244
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :int = nn.ModuleList(__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) # merge samples if i == 0: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample else: lowerCAmelCase_ :str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__A , __A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( __A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , ) idx += 1 lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}""" @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase_ :List[Any] = pretrained_model_path while os.path.isdir(__A ): lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A ) controlnets.append(__A ) idx += 1 lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" ) if len(__A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__A )
84
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { '''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''', } class a__( A__ ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = "open-llama" def __init__( self , __lowerCAmelCase=100000 , __lowerCAmelCase=4096 , __lowerCAmelCase=11008 , __lowerCAmelCase=32 , __lowerCAmelCase=32 , __lowerCAmelCase="silu" , __lowerCAmelCase=2048 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ): """simple docstring""" lowerCAmelCase = vocab_size lowerCAmelCase = max_position_embeddings lowerCAmelCase = hidden_size lowerCAmelCase = intermediate_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = initializer_range lowerCAmelCase = rms_norm_eps lowerCAmelCase = use_cache lowerCAmelCase = kwargs.pop( """use_memorry_efficient_attention""" , __A) lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_dropout_prob lowerCAmelCase = use_stable_embedding lowerCAmelCase = shared_input_output_embedding lowerCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A , ) def a_ ( self): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __A) or len(self.rope_scaling) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f"got {self.rope_scaling}") lowerCAmelCase = self.rope_scaling.get("""type""" , __A) lowerCAmelCase = self.rope_scaling.get("""factor""" , __A) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(__A , __A) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
272
"""simple docstring""" from PIL import Image def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image: '''simple docstring''' def brightness(lowercase__ : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(lowercase__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 __UpperCAmelCase = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
84
0
from __future__ import annotations def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" if len(lowercase__ ) == 0: raise ValueError('''find_max() arg is an empty sequence''' ) if ( left >= len(lowercase__ ) or left < -len(lowercase__ ) or right >= len(lowercase__ ) or right < -len(lowercase__ ) ): raise IndexError('''list index out of range''' ) if left == right: return nums[left] snake_case = (left + right) >> 1 # the middle snake_case = find_max(lowercase__ ,lowercase__ ,lowercase__ ) # find max in range[left, mid] snake_case = find_max(lowercase__ ,mid + 1 ,lowercase__ ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
127
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _SCREAMING_SNAKE_CASE : def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Dict = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = inputs["""prompt"""] lowerCAmelCase_ :Optional[int] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Optional[int] = inputs["""output_type"""] if "image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""image"""] else: lowerCAmelCase_ :int = None if "mask_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""mask_image"""] else: lowerCAmelCase_ :int = None if "original_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""original_image"""] else: lowerCAmelCase_ :List[Any] = None lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A ) # inputs with prompt converted to embeddings lowerCAmelCase_ :List[str] = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :int = image if mask_image is not None: lowerCAmelCase_ :Tuple = mask_image if original_image is not None: lowerCAmelCase_ :Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__A , __A , __A ) lowerCAmelCase_ :Optional[int] = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Tuple = inputs["""output_type"""] # inputs with prompt converted to embeddings lowerCAmelCase_ :Tuple = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :Optional[int] = image if mask_image is not None: lowerCAmelCase_ :str = mask_image if original_image is not None: lowerCAmelCase_ :Tuple = original_image lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Any = self.get_dummy_components() lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Dict = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 )
84
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer A : Optional[Any] = logging.get_logger(__name__) A : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart A : List[Any] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } A : Dict = { "facebook/bart-base": 1_0_2_4, "facebook/bart-large": 1_0_2_4, "facebook/bart-large-mnli": 1_0_2_4, "facebook/bart-large-cnn": 1_0_2_4, "facebook/bart-large-xsum": 1_0_2_4, "yjernite/bart_eli5": 1_0_2_4, } class _UpperCamelCase ( A__ ): '''simple docstring''' __UpperCAmelCase : List[str] =VOCAB_FILES_NAMES __UpperCAmelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Optional[Any] =["input_ids", "attention_mask"] __UpperCAmelCase : int =BartTokenizer def __init__( self , __a=None , __a=None , __a=None , __a="replace" , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=False , __a=True , **__a , ): super().__init__( __A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , ) __lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space: __lowerCAmelCase = getattr(__A , pre_tok_state.pop("type" ) ) __lowerCAmelCase = add_prefix_space __lowerCAmelCase = pre_tok_class(**__A ) __lowerCAmelCase = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __lowerCAmelCase = """post_processor""" __lowerCAmelCase = getattr(self.backend_tokenizer , __A , __A ) if tokenizer_component_instance: __lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __lowerCAmelCase = tuple(state["sep"] ) if "cls" in state: __lowerCAmelCase = tuple(state["cls"] ) __lowerCAmelCase = False if state.get("add_prefix_space" , __A ) != add_prefix_space: __lowerCAmelCase = add_prefix_space __lowerCAmelCase = True if state.get("trim_offsets" , __A ) != trim_offsets: __lowerCAmelCase = trim_offsets __lowerCAmelCase = True if changes_to_apply: __lowerCAmelCase = getattr(__A , state.pop("type" ) ) __lowerCAmelCase = component_class(**__A ) setattr(self.backend_tokenizer , __A , __A ) @property def snake_case ( self ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def snake_case ( self , __a ): __lowerCAmelCase = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value __lowerCAmelCase = value def snake_case ( self , *__a , **__a ): __lowerCAmelCase = kwargs.get("is_split_into_words" , __A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__A , **__A ) def snake_case ( self , *__a , **__a ): __lowerCAmelCase = kwargs.get("is_split_into_words" , __A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*__A , **__A ) def snake_case ( self , __a , __a = None ): __lowerCAmelCase = self._tokenizer.model.save(__A , name=__A ) return tuple(__A ) def snake_case ( self , __a , __a=None ): __lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def snake_case ( self , __a , __a = None ): __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
57
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :int = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :List[Any] = jax.device_count() lowerCAmelCase_ :Optional[Any] = num_samples * [prompt] lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Optional[Any] = replicate(__A ) lowerCAmelCase_ :Union[str, Any] = shard(__A ) lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2""" lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained( __A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :Optional[int] = scheduler_params lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :Tuple = jax.device_count() lowerCAmelCase_ :str = num_samples * [prompt] lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Tuple = replicate(__A ) lowerCAmelCase_ :Optional[int] = shard(__A ) lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
84
0
def a_ ( SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1_000 ): '''simple docstring''' _lowerCamelCase : str =1 _lowerCamelCase : str =0 for divide_by_number in range(lowercase__ , digit + 1 ): _lowerCamelCase : list[int] =[] _lowerCamelCase : int =numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(lowercase__ ): _lowerCamelCase : Tuple =len(lowercase__ ) _lowerCamelCase : int =divide_by_number else: has_been_divided.append(lowercase__ ) _lowerCamelCase : Optional[int] =now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
199
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def _snake_case ( ) -> Generator[int, None, None]: '''simple docstring''' lowerCAmelCase_ :dict[int, int] = {} lowerCAmelCase_ :int = 2 while True: lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ ) if factor: lowerCAmelCase_ :Optional[int] = factor + prime while x in factor_map: x += factor lowerCAmelCase_ :List[str] = factor else: lowerCAmelCase_ :Optional[int] = prime yield prime prime += 1 def _snake_case ( lowercase__ : float = 1E10 ) -> int: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = sieve() lowerCAmelCase_ :str = 1 while True: lowerCAmelCase_ :int = next(lowercase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowercase__ ) n += 2 if __name__ == "__main__": print(solution())
84
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase__ = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, """tokenizer_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""", }, } lowerCAmelCase__ = { """google/rembert""": 2_5_6, } lowerCAmelCase__ = """▁""" class a__ ( A__ ): """simple docstring""" __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = RemBertTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase=True , lowercase=False , lowercase="[CLS]" , lowercase="[SEP]" , lowercase="<unk>" , lowercase="[SEP]" , lowercase="<pad>" , lowercase="[CLS]" , lowercase="[MASK]" , **lowercase , ) -> Optional[Any]: '''simple docstring''' A__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( __A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1] def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__A ): logger.error("Vocabulary path ({}) should be a directory".format(__A ) ) return A__ = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
68
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx" def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]: lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) ) lowerCAmelCase_ :List[Any] = torch.manual_seed(__A ) lowerCAmelCase_ :Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :int = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = self.get_dummy_inputs() lowerCAmelCase_ :List[str] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :str = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs() lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Dict = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[int] = ort.SessionOptions() lowerCAmelCase_ :Dict = False return options def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :List[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :str = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :Dict = output.images lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :List[str] = init_image.resize((128, 128) ) lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" ) lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :int = output.images lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Union[str, Any] = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
84
0
from math import asin, atan, cos, radians, sin, sqrt, tan UpperCamelCase_ = 6_3_7_8_1_3_7.0 UpperCamelCase_ = 6_3_5_6_7_5_2.3_1_4_2_4_5 UpperCamelCase_ = 6378137 def lowerCamelCase_ ( _a : float , _a : float , _a : float , _a : float ): '''simple docstring''' UpperCAmelCase_ : List[str] = (AXIS_A - AXIS_B) / AXIS_A UpperCAmelCase_ : str = atan((1 - flattening) * tan(radians(lowercase__ ) ) ) UpperCAmelCase_ : str = atan((1 - flattening) * tan(radians(lowercase__ ) ) ) UpperCAmelCase_ : Union[str, Any] = radians(lowercase__ ) UpperCAmelCase_ : List[Any] = radians(lowercase__ ) # Equation UpperCAmelCase_ : Any = sin((phi_a - phi_a) / 2 ) UpperCAmelCase_ : Tuple = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda UpperCAmelCase_ : Any = sqrt(sin_sq_phi + (cos(lowercase__ ) * cos(lowercase__ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
345
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , ) assert hasattr(self , """env""" ) def __lowerCAmelCase ( self , __A ) -> Any: # configuration for running training on smdistributed Model Parallel lowerCAmelCase_ :Union[str, Any] = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase_ :Tuple = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , ) def __lowerCAmelCase ( self , __A ) -> List[Any]: TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def __lowerCAmelCase ( self , __A ) -> List[str]: # create estimator lowerCAmelCase_ :Any = self.create_estimator(__A ) # run training estimator.fit() # result dataframe lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase_ :Optional[int] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
84
0
'''simple docstring''' UpperCamelCase_ = """\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n""" UpperCamelCase_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}] UpperCamelCase_ = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
309
"""simple docstring""" def _snake_case ( lowercase__ : int = 1_0 ) -> str: '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError("""Invalid input""" ) lowerCAmelCase_ :List[str] = 1_0**n lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(10) = }""")
84
0
'''simple docstring''' import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class A_ ( A__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx" def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple=0 ) -> Optional[int]: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) ) UpperCAmelCase : List[Any] = torch.manual_seed(__A ) UpperCAmelCase : Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]: UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : Optional[Any] = self.get_dummy_inputs() UpperCAmelCase : Dict = pipe(**__A ).images UpperCAmelCase : Any = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) UpperCAmelCase : int = np.array( [0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) UpperCAmelCase : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : int = self.get_dummy_inputs() UpperCAmelCase : List[str] = pipe(**__A ).images UpperCAmelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase : str = np.array( [0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) UpperCAmelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : List[Any] = self.get_dummy_inputs() UpperCAmelCase : Union[str, Any] = pipe(**__A ).images UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase : Tuple = np.array( [0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self : str ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) UpperCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs() UpperCAmelCase : Optional[Any] = pipe(**__A ).images UpperCAmelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase : Tuple = np.array( [0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: UpperCAmelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) UpperCAmelCase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : List[Any] = self.get_dummy_inputs() UpperCAmelCase : Dict = pipe(**__A ).images UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase : Dict = np.array( [0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class A_ ( unittest.TestCase ): '''simple docstring''' @property def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase_ ( self : int ) -> Tuple: UpperCAmelCase : Optional[int] = ort.SessionOptions() UpperCAmelCase : Dict = False return options def UpperCAmelCase_ ( self : str ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) UpperCAmelCase : Optional[Any] = init_image.resize((128, 128) ) # using the PNDM scheduler by default UpperCAmelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : Union[str, Any] = """A fantasy landscape, trending on artstation""" UpperCAmelCase : List[Any] = torch.manual_seed(0 ) UpperCAmelCase : str = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type='np' , ) UpperCAmelCase : Dict = output.images UpperCAmelCase : List[str] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase : Optional[Any] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: UpperCAmelCase : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) UpperCAmelCase : List[str] = init_image.resize((128, 128) ) UpperCAmelCase : Any = LMSDiscreteScheduler.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' ) UpperCAmelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase : Any = """A fantasy landscape, trending on artstation""" UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : List[str] = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type='np' , ) UpperCAmelCase : int = output.images UpperCAmelCase : List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase : Union[str, Any] = np.array( [0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
151
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = 'src/transformers' __UpperCAmelCase = 'docs/source/en/tasks' def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : Any ) -> str: '''simple docstring''' with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCAmelCase_ :List[Any] = f.readlines() # Find the start prompt. lowerCAmelCase_ :Tuple = 0 while not lines[start_index].startswith(lowercase__ ): start_index += 1 start_index += 1 lowerCAmelCase_ :Dict = start_index while not lines[end_index].startswith(lowercase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase_ :List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() ) lowerCAmelCase_ :Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def _snake_case ( lowercase__ : int , lowercase__ : str=False ) -> Dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = _find_text_in_file( filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , ) lowerCAmelCase_ :int = get_model_list_for_task(lowercase__ ) if current_list != new_list: if overwrite: with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" """ to fix this.""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
84
0
from statistics import mean import numpy as np def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 0 # Number of processes finished SCREAMING_SNAKE_CASE = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. SCREAMING_SNAKE_CASE = [0] * no_of_process # List to include calculation results SCREAMING_SNAKE_CASE = [0] * no_of_process # Sort by arrival time. SCREAMING_SNAKE_CASE = [burst_time[i] for i in np.argsort(lowercase__)] SCREAMING_SNAKE_CASE = [process_name[i] for i in np.argsort(lowercase__)] arrival_time.sort() while no_of_process > finished_process_count: SCREAMING_SNAKE_CASE = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: SCREAMING_SNAKE_CASE = arrival_time[i] SCREAMING_SNAKE_CASE = 0 # Index showing the location of the process being performed SCREAMING_SNAKE_CASE = 0 # Saves the current response ratio. SCREAMING_SNAKE_CASE = 0 for i in range(0 , lowercase__): if finished_process[i] == 0 and arrival_time[i] <= current_time: SCREAMING_SNAKE_CASE = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: SCREAMING_SNAKE_CASE = temp SCREAMING_SNAKE_CASE = i # Calculate the turn around time SCREAMING_SNAKE_CASE = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. SCREAMING_SNAKE_CASE = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [0] * no_of_process for i in range(0 , lowercase__): SCREAMING_SNAKE_CASE = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": a_ : Dict = 5 a_ : Union[str, Any] = ['A', 'B', 'C', 'D', 'E'] a_ : List[str] = [1, 2, 3, 4, 5] a_ : List[str] = [1, 2, 3, 4, 5] a_ : Tuple = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) a_ : Optional[int] = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time') for i in range(0, no_of_process): print( f"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t""" f"""{turn_around_time[i]}\t\t\t{waiting_time[i]}""" ) print(f"""average waiting time : {mean(waiting_time):.5f}""") print(f"""average turn around time : {mean(turn_around_time):.5f}""")
137
"""simple docstring""" def _snake_case ( lowercase__ : list[int] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = [] if len(lowercase__ ) == 1: return [nums.copy()] for _ in range(len(lowercase__ ) ): lowerCAmelCase_ :Optional[Any] = nums.pop(0 ) lowerCAmelCase_ :str = permute(lowercase__ ) for perm in permutations: perm.append(lowercase__ ) result.extend(lowercase__ ) nums.append(lowercase__ ) return result def _snake_case ( lowercase__ : Tuple ) -> List[str]: '''simple docstring''' def backtrack(lowercase__ : str ): if start == len(lowercase__ ) - 1: output.append(nums[:] ) else: for i in range(lowercase__ , len(lowercase__ ) ): lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] backtrack(start + 1 ) lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] # backtrack lowerCAmelCase_ :int = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function __UpperCAmelCase = permutea([1, 2, 3]) print(res) doctest.testmod()
84
0
class lowercase_ : # Public class to implement a graph """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->None: lowerCAmelCase = row lowerCAmelCase = col lowerCAmelCase = graph def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->None: # Checking all 8 elements surrounding nth element lowerCAmelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowerCAmelCase = [-1, 0, 1, -1, 1, -1, 0, 1] lowerCAmelCase = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __A ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , __A ) def SCREAMING_SNAKE_CASE_ ( self ) ->int: # And finally, count all islands. lowerCAmelCase = [[False for j in range(self.COL )] for i in range(self.ROW )] lowerCAmelCase = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__A , __A , __A ) count += 1 return count
338
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = BioGptTokenizer UpperCAmelCase_ :str = False def __lowerCAmelCase ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[Any] = """lower newer""" lowerCAmelCase_ :Tuple = """lower newer""" return input_text, output_text def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase_ :Union[str, Any] = """lower""" lowerCAmelCase_ :Any = ["""low""", """er</w>"""] lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = tokens + ["""<unk>"""] lowerCAmelCase_ :List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
84
0
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json lowerCamelCase_ = '''sshleifer/mar_enro_6_3_student''' class __A( A__ ): """simple docstring""" def UpperCAmelCase_ (self ): super().setUp() UpperCamelCase__ = cached_path( """https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=__A , ) UpperCamelCase__ = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k" @slow @require_torch_gpu def UpperCAmelCase_ (self ): MarianMTModel.from_pretrained(__A ) @slow @require_torch_gpu def UpperCAmelCase_ (self ): UpperCamelCase__ = { """$MAX_LEN""": 64, """$BS""": 64, """$GAS""": 1, """$ENRO_DIR""": self.data_dir, """facebook/mbart-large-cc25""": MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", """--learning_rate=3e-5""": """--learning_rate 3e-4""", """--num_train_epochs 6""": """--num_train_epochs 1""", } # Clean up bash script UpperCamelCase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip() UpperCamelCase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" ) for k, v in env_vars_to_replace.items(): UpperCamelCase__ = bash_script.replace(__A , str(__A ) ) UpperCamelCase__ = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") UpperCamelCase__ = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split() # XXX: args.gpus > 1 : handle multi_gpu in the future UpperCamelCase__ = ["""finetune.py"""] + bash_script.split() + args with patch.object(__A , """argv""" , __A ): UpperCamelCase__ = argparse.ArgumentParser() UpperCamelCase__ = pl.Trainer.add_argparse_args(__A ) UpperCamelCase__ = SummarizationModule.add_model_specific_args(__A , os.getcwd() ) UpperCamelCase__ = parser.parse_args() UpperCamelCase__ = main(__A ) # Check metrics UpperCamelCase__ = load_json(model.metrics_save_path ) UpperCamelCase__ = metrics["""val"""][0] UpperCamelCase__ = metrics["""val"""][-1] self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , __A ) self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCamelCase__ = os.listdir(__A ) UpperCamelCase__ = [x for x in contents if x.endswith(""".ckpt""" )][0] UpperCamelCase__ = os.path.join(args.output_dir , __A ) UpperCamelCase__ = torch.load(__A , map_location="""cpu""" ) UpperCamelCase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight""" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCamelCase__ = {os.path.basename(__A ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["""test"""] ) == 1 class __A( A__ ): """simple docstring""" @timeout_decorator.timeout(6_00 ) @slow @require_torch_gpu def UpperCAmelCase_ (self ): UpperCamelCase__ = F"{self.test_file_dir_str}/test_data/wmt_en_ro" UpperCamelCase__ = { """--fp16_opt_level=O1""": """""", """$MAX_LEN""": 1_28, """$BS""": 16, """$GAS""": 1, """$ENRO_DIR""": data_dir, """$m""": """sshleifer/student_marian_en_ro_6_1""", """val_check_interval=0.25""": """val_check_interval=1.0""", } # Clean up bash script UpperCamelCase__ = ( (self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip() ) UpperCamelCase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" ) UpperCamelCase__ = bash_script.replace("""--fp16 """ , """ """ ) for k, v in env_vars_to_replace.items(): UpperCamelCase__ = bash_script.replace(__A , str(__A ) ) UpperCamelCase__ = self.get_auto_remove_tmp_dir() UpperCamelCase__ = bash_script.replace("""--fp16""" , """""" ) UpperCamelCase__ = 6 UpperCamelCase__ = ( ["""distillation.py"""] + bash_script.split() + [ F"--output_dir={output_dir}", """--gpus=1""", """--learning_rate=1e-3""", F"--num_train_epochs={epochs}", """--warmup_steps=10""", """--val_check_interval=1.0""", """--do_predict""", ] ) with patch.object(__A , """argv""" , __A ): UpperCamelCase__ = argparse.ArgumentParser() UpperCamelCase__ = pl.Trainer.add_argparse_args(__A ) UpperCamelCase__ = SummarizationDistiller.add_model_specific_args(__A , os.getcwd() ) UpperCamelCase__ = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu UpperCamelCase__ = distill_main(__A ) # Check metrics UpperCamelCase__ = load_json(model.metrics_save_path ) UpperCamelCase__ = metrics["""val"""][0] UpperCamelCase__ = metrics["""val"""][-1] assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , __A ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCamelCase__ = os.listdir(__A ) UpperCamelCase__ = [x for x in contents if x.endswith(""".ckpt""" )][0] UpperCamelCase__ = os.path.join(args.output_dir , __A ) UpperCamelCase__ = torch.load(__A , map_location="""cpu""" ) UpperCamelCase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight""" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCamelCase__ = {os.path.basename(__A ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["""test"""] ) == 1
244
"""simple docstring""" from ...configuration_utils import PretrainedConfig class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "bert-generation" def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Any = vocab_size lowerCAmelCase_ :List[Any] = hidden_size lowerCAmelCase_ :Optional[int] = num_hidden_layers lowerCAmelCase_ :int = num_attention_heads lowerCAmelCase_ :List[Any] = hidden_act lowerCAmelCase_ :Optional[Any] = intermediate_size lowerCAmelCase_ :List[Any] = hidden_dropout_prob lowerCAmelCase_ :int = attention_probs_dropout_prob lowerCAmelCase_ :Tuple = max_position_embeddings lowerCAmelCase_ :List[str] = initializer_range lowerCAmelCase_ :Union[str, Any] = layer_norm_eps lowerCAmelCase_ :List[str] = position_embedding_type lowerCAmelCase_ :Optional[int] = use_cache
84
0
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class a__( A__ ): '''simple docstring''' def __init__( self): """simple docstring""" lowerCAmelCase = [] def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" self.events.append("""on_init_end""") def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" self.events.append("""on_train_begin""") def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" self.events.append("""on_train_end""") def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" self.events.append("""on_epoch_begin""") def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" self.events.append("""on_epoch_end""") def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" self.events.append("""on_step_begin""") def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" self.events.append("""on_step_end""") def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" self.events.append("""on_evaluate""") def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" self.events.append("""on_predict""") def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" self.events.append("""on_save""") def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" self.events.append("""on_log""") def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" self.events.append("""on_prediction_step""") @require_torch class a__( unittest.TestCase ): '''simple docstring''' def a_ ( self): """simple docstring""" lowerCAmelCase = tempfile.mkdtemp() def a_ ( self): """simple docstring""" shutil.rmtree(self.output_dir) def a_ ( self , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=64 , __lowerCAmelCase=64 , __lowerCAmelCase=None , __lowerCAmelCase=False , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = RegressionDataset(length=__A) lowerCAmelCase = RegressionDataset(length=__A) lowerCAmelCase = RegressionModelConfig(a=__A , b=__A) lowerCAmelCase = RegressionPreTrainedModel(__A) lowerCAmelCase = TrainingArguments(self.output_dir , disable_tqdm=__A , report_to=[] , **__A) return Trainer( __A , __A , train_dataset=__A , eval_dataset=__A , callbacks=__A , ) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" self.assertEqual(len(__A) , len(__A)) # Order doesn't matter lowerCAmelCase = sorted(__A , key=lambda __lowerCAmelCase: cb.__name__ if isinstance(__A , __A) else cb.__class__.__name__) lowerCAmelCase = sorted(__A , key=lambda __lowerCAmelCase: cb.__name__ if isinstance(__A , __A) else cb.__class__.__name__) for cba, cba in zip(__A , __A): if isinstance(__A , __A) and isinstance(__A , __A): self.assertEqual(__A , __A) elif isinstance(__A , __A) and not isinstance(__A , __A): self.assertEqual(__A , cba.__class__) elif not isinstance(__A , __A) and isinstance(__A , __A): self.assertEqual(cba.__class__ , __A) else: self.assertEqual(__A , __A) def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = ["""on_init_end""", """on_train_begin"""] lowerCAmelCase = 0 lowerCAmelCase = len(trainer.get_eval_dataloader()) lowerCAmelCase = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs): expected_events.append("""on_epoch_begin""") for _ in range(__A): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""") if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""") expected_events.append("""on_epoch_end""") if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_trainer() lowerCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , __A) # Callbacks passed at init are added to the default callbacks lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(__A) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowerCAmelCase = self.get_trainer(disable_tqdm=__A) lowerCAmelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , __A) def a_ ( self): """simple docstring""" lowerCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowerCAmelCase = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(__A) expected_callbacks.remove(__A) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A) lowerCAmelCase = self.get_trainer() lowerCAmelCase = trainer.pop_callback(__A) self.assertEqual(cb.__class__ , __A) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A) trainer.add_callback(__A) expected_callbacks.insert(0 , __A) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A) # We can also add, pop, or remove by instance lowerCAmelCase = self.get_trainer() lowerCAmelCase = trainer.callback_handler.callbacks[0] trainer.remove_callback(__A) expected_callbacks.remove(__A) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A) lowerCAmelCase = self.get_trainer() lowerCAmelCase = trainer.callback_handler.callbacks[0] lowerCAmelCase = trainer.pop_callback(__A) self.assertEqual(__A , __A) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A) trainer.add_callback(__A) expected_callbacks.insert(0 , __A) self.check_callbacks_equality(trainer.callback_handler.callbacks , __A) def a_ ( self): """simple docstring""" import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=__A) lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() lowerCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(__A , self.get_expected_events(__A)) # Independent log/save/eval lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5) trainer.train() lowerCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(__A , self.get_expected_events(__A)) lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5) trainer.train() lowerCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(__A , self.get_expected_events(__A)) lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""") trainer.train() lowerCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(__A , self.get_expected_events(__A)) lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""") trainer.train() lowerCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(__A , self.get_expected_events(__A)) # A bit of everything lowerCAmelCase = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() lowerCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(__A , self.get_expected_events(__A)) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""") as warn_mock: lowerCAmelCase = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(__A) in warn_mock.call_args[0][0]
272
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [False] * len(lowercase__ ) lowerCAmelCase_ :str = [] queue.append(lowercase__ ) lowerCAmelCase_ :Any = True while queue: lowerCAmelCase_ :Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :int = u return visited[t] def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ )) lowerCAmelCase_ :str = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = float("""Inf""" ) lowerCAmelCase_ :List[str] = sink while s != source: # Find the minimum value in select path lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] ) lowerCAmelCase_ :Union[str, Any] = parent[s] max_flow += path_flow lowerCAmelCase_ :Tuple = sink while v != source: lowerCAmelCase_ :List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase_ :Union[str, Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
84
0
from typing import List from .keymap import KEYMAP, get_character def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" def decorator(UpperCamelCase_ ): snake_case = getattr(lowercase__ ,'''handle_key''' ,[] ) handle += [key] setattr(lowercase__ ,'''handle_key''' ,lowercase__ ) return func return decorator def UpperCAmelCase__ (*UpperCamelCase_ ): """simple docstring""" def decorator(UpperCamelCase_ ): snake_case = getattr(lowercase__ ,'''handle_key''' ,[] ) handle += keys setattr(lowercase__ ,'''handle_key''' ,lowercase__ ) return func return decorator class A__ ( A__ ): """simple docstring""" def __new__( cls , __snake_case , __snake_case , __snake_case ): snake_case = super().__new__(cls , __A , __A , __A ) if not hasattr(__A , '''key_handler''' ): setattr(__A , '''key_handler''' , {} ) setattr(__A , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): snake_case = getattr(__A , '''handle_key''' , [] ) for key in handled_keys: snake_case = value return new_cls @staticmethod def a_ ( cls ): snake_case = get_character() if char != KEYMAP["undefined"]: snake_case = ord(__A ) snake_case = cls.key_handler.get(__A ) if handler: snake_case = char return handler(cls ) else: return None def UpperCAmelCase__ (cls ): """simple docstring""" return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
127
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 1_0 lowerCAmelCase_ :Optional[int] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) lowerCAmelCase_ :int = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0, """id""": list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" lowerCAmelCase_ :List[Any] = FILE_CONTENT with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> Tuple: '''simple docstring''' import bza lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' import gzip lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with gzip.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" ) with lza.frame.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any: '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' import tarfile lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' import lzma lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" ) with lzma.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import zipfile lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" ) with zstd.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml""" lowerCAmelCase_ :Any = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Any: '''simple docstring''' lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ ) lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: lowerCAmelCase_ :Union[str, Any] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' import bza lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(lowercase__ , """rb""" ) as f: lowerCAmelCase_ :Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) lowerCAmelCase_ :Optional[Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(lowercase__ , """wb""" ) as f: lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> int: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Tuple: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) return data_dir
84
0
"""simple docstring""" from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 10**-10 ): '''simple docstring''' __lowerCAmelCase = a while True: __lowerCAmelCase = Decimal(lowercase__ ) - ( Decimal(eval(lowercase__ ) ) / Decimal(eval(str(diff(lowercase__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(lowercase__ ) ) < precision: # noqa: S307 return float(lowercase__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''') # Find Square Root of 5 print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''') # Exponential Roots print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
57
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[Any] = "data2vec-text" def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Dict = vocab_size lowerCAmelCase_ :Dict = hidden_size lowerCAmelCase_ :int = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :Any = hidden_act lowerCAmelCase_ :Optional[int] = intermediate_size lowerCAmelCase_ :str = hidden_dropout_prob lowerCAmelCase_ :Any = attention_probs_dropout_prob lowerCAmelCase_ :str = max_position_embeddings lowerCAmelCase_ :int = type_vocab_size lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :List[Any] = layer_norm_eps lowerCAmelCase_ :List[Any] = position_embedding_type lowerCAmelCase_ :List[Any] = use_cache lowerCAmelCase_ :List[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
84
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class A ( A__ ): UpperCamelCase__ : List[Any] ="unispeech" def __init__( self : str , lowercase_ : Optional[int]=32 , lowercase_ : Union[str, Any]=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : Union[str, Any]=3072 , lowercase_ : List[str]="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : int=0.1 , lowercase_ : str=0.1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Any=0.1 , lowercase_ : str=0.1 , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[int]=1E-5 , lowercase_ : Dict="group" , lowercase_ : List[str]="gelu" , lowercase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , lowercase_ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : int=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[Any]=128 , lowercase_ : Union[str, Any]=16 , lowercase_ : Optional[int]=False , lowercase_ : Any=True , lowercase_ : Any=0.05 , lowercase_ : str=10 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=10 , lowercase_ : Tuple=0 , lowercase_ : Dict=320 , lowercase_ : Tuple=2 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[int]=100 , lowercase_ : Dict=256 , lowercase_ : Union[str, Any]=256 , lowercase_ : Any=0.1 , lowercase_ : str="mean" , lowercase_ : Optional[Any]=False , lowercase_ : Any=False , lowercase_ : int=256 , lowercase_ : int=80 , lowercase_ : Any=0 , lowercase_ : str=1 , lowercase_ : Optional[int]=2 , lowercase_ : int=0.5 , **lowercase_ : Tuple , ) -> Tuple: """simple docstring""" super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A ) _lowerCamelCase : str =hidden_size _lowerCamelCase : Dict =feat_extract_norm _lowerCamelCase : Optional[int] =feat_extract_activation _lowerCamelCase : List[str] =list(__A ) _lowerCamelCase : Dict =list(__A ) _lowerCamelCase : int =list(__A ) _lowerCamelCase : List[str] =conv_bias _lowerCamelCase : Tuple =num_conv_pos_embeddings _lowerCamelCase : Dict =num_conv_pos_embedding_groups _lowerCamelCase : Optional[Any] =len(self.conv_dim ) _lowerCamelCase : Optional[Any] =num_hidden_layers _lowerCamelCase : Any =intermediate_size _lowerCamelCase : List[Any] =hidden_act _lowerCamelCase : Optional[int] =num_attention_heads _lowerCamelCase : str =hidden_dropout _lowerCamelCase : str =attention_dropout _lowerCamelCase : Optional[Any] =activation_dropout _lowerCamelCase : Tuple =feat_proj_dropout _lowerCamelCase : Tuple =final_dropout _lowerCamelCase : Tuple =layerdrop _lowerCamelCase : Optional[int] =layer_norm_eps _lowerCamelCase : List[str] =initializer_range _lowerCamelCase : Union[str, Any] =num_ctc_classes _lowerCamelCase : List[str] =vocab_size _lowerCamelCase : int =do_stable_layer_norm _lowerCamelCase : Dict =use_weighted_layer_sum _lowerCamelCase : Any =classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Dict =apply_spec_augment _lowerCamelCase : Optional[Any] =mask_time_prob _lowerCamelCase : Dict =mask_time_length _lowerCamelCase : int =mask_time_min_masks _lowerCamelCase : Optional[Any] =mask_feature_prob _lowerCamelCase : Dict =mask_feature_length _lowerCamelCase : Any =mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : str =num_codevectors_per_group _lowerCamelCase : Union[str, Any] =num_codevector_groups _lowerCamelCase : int =contrastive_logits_temperature _lowerCamelCase : List[Any] =feat_quantizer_dropout _lowerCamelCase : Optional[int] =num_negatives _lowerCamelCase : Optional[Any] =codevector_dim _lowerCamelCase : List[Any] =proj_codevector_dim _lowerCamelCase : List[Any] =diversity_loss_weight # ctc loss _lowerCamelCase : Union[str, Any] =ctc_loss_reduction _lowerCamelCase : int =ctc_zero_infinity # pretraining loss _lowerCamelCase : Union[str, Any] =replace_prob @property def lowerCamelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
199
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase_ :Tuple = (wi_a, wi_a) else: lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ :List[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ :Optional[Any] = layer_norm lowerCAmelCase_ :Any = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Tuple = q.T lowerCAmelCase_ :str = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :List[Any] = wi[0].T lowerCAmelCase_ :Dict = wi[1].T else: lowerCAmelCase_ :int = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ :List[Any] = layer_norm lowerCAmelCase_ :List[str] = k.T lowerCAmelCase_ :Any = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :Dict = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ :Optional[int] = layer_norm lowerCAmelCase_ :str = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :int = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ :List[Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :Any = wi[0].T lowerCAmelCase_ :Any = wi[1].T else: lowerCAmelCase_ :Tuple = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ :Optional[Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T return new def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ :Any = state_dict["""shared.weight"""] return state_dict def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
84
0
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger() @dataclass class a__ : """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = field(default_factory=A__ ) __lowerCamelCase = field(default_factory=A__ ) def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = len(list(m.modules() ) ) == 1 or isinstance(__A , nn.Convad ) or isinstance(__A , nn.BatchNormad ) if has_not_submodules: self.traced.append(__A ) def __call__( self , lowercase ) -> Union[str, Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(__A ) [x.remove() for x in self.handles] return self @property def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' return list(filter(lambda lowercase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class a__ : """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 0 __lowerCamelCase = field(default_factory=A__ ) __lowerCamelCase = field(default_factory=A__ ) def __call__( self , lowercase ) -> List[str]: '''simple docstring''' A__ = Tracker(self.dest )(__A ).parametrized A__ = Tracker(self.src )(__A ).parametrized A__ = list(filter(lambda lowercase : type(__A ) not in self.src_skip , __A ) ) A__ = list(filter(lambda lowercase : type(__A ) not in self.dest_skip , __A ) ) if len(__A ) != len(__A ): raise Exception( F'Numbers of operations are different. Source module has {len(__A )} operations while' F' destination module has {len(__A )}.' ) for dest_m, src_m in zip(__A , __A ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'Transfered from={src_m} to={dest_m}' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: ResNetConfig , SCREAMING_SNAKE_CASE_: Path , SCREAMING_SNAKE_CASE_: bool = True ) -> Union[str, Any]: '''simple docstring''' print(F'Converting {name}...' ) with torch.no_grad(): A__ = timm.create_model(lowercase__ , pretrained=lowercase__ ).eval() A__ = ResNetForImageClassification(lowercase__ ).eval() A__ = ModuleTransfer(src=lowercase__ , dest=lowercase__ ) A__ = torch.randn((1, 3, 2_2_4, 2_2_4) ) module_transfer(lowercase__ ) assert torch.allclose(from_model(lowercase__ ) , our_model(lowercase__ ).logits ), "The model logits don't match the original one." A__ = F'resnet{"-".join(name.split("resnet" ) )}' print(lowercase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=lowercase__ , ) # we can use the convnext one A__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=lowercase__ , ) print(F'Pushed {checkpoint_name}' ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Path , SCREAMING_SNAKE_CASE_: str = None , SCREAMING_SNAKE_CASE_: bool = True ) -> List[Any]: '''simple docstring''' A__ = """imagenet-1k-id2label.json""" A__ = 1_0_0_0 A__ = (1, num_labels) A__ = """huggingface/label-files""" A__ = num_labels A__ = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) ) A__ = {int(lowercase__ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ ) A__ = { """resnet18""": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="basic" ), """resnet26""": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ), """resnet34""": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="basic" ), """resnet50""": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ), """resnet101""": ImageNetPreTrainedConfig( depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ), """resnet152""": ImageNetPreTrainedConfig( depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(lowercase__ , names_to_config[model_name] , lowercase__ , lowercase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return config, expected_shape if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
68
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Optional[Any] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" ) if "norm" in key: lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" ) if "layer_norm1" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )] lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" ) if "attn.q" in key: lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" ) if "bot_conv" in key: lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCAmelCase_ :List[Any] = value return new_state_dict def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ :Optional[Any] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ :List[Any] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :] def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int: '''simple docstring''' lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor() # prepare image lowerCAmelCase_ :List[Any] = prepare_img() lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) ) # rename keys lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ ) # key and value matrices need special treatment read_in_k_v(lowercase__ , lowercase__ ) # create HuggingFace model and load state dict lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # forward pass lowerCAmelCase_ :Dict = model(lowercase__ ) lowerCAmelCase_ :Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase_ :Optional[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase_ :Any = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __UpperCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
84
0
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Union[str, Any] ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A__ ( self: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Any = 3 UpperCAmelCase_ : Tuple = (32, 32) UpperCAmelCase_ : Dict = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__A ) return image @property def A__ ( self: Dict ) -> Any: torch.manual_seed(0 ) UpperCAmelCase_ : Dict = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,) return model @property def A__ ( self: List[str] ) -> List[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) return model @property def A__ ( self: Optional[Any] ) -> Tuple: torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = RobertaSeriesConfig( hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5006 ,) return RobertaSeriesModelWithTransformation(__A ) @property def A__ ( self: Any ) -> int: def extract(*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[int] ): class _snake_case : '''simple docstring''' def __init__( self: int ) -> str: UpperCAmelCase_ : List[str] = torch.ones([0] ) def A__ ( self: str ,lowerCamelCase_: List[str] ) -> int: self.pixel_values.to(__A ) return self return Out() return extract def A__ ( self: Optional[Any] ) -> str: UpperCAmelCase_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.dummy_cond_unet UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=__A ) UpperCAmelCase_ : int = self.dummy_vae UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder UpperCAmelCase_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" ) UpperCAmelCase_ : Dict = 77 UpperCAmelCase_ : Tuple = self.dummy_image.to(__A ) UpperCAmelCase_ : Any = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Dict = AltDiffusionImgaImgPipeline( unet=__A ,scheduler=__A ,vae=__A ,text_encoder=__A ,tokenizer=__A ,safety_checker=__A ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : Dict = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=__A ) UpperCAmelCase_ : Any = alt_pipe.to(__A ) alt_pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[Any] = torch.Generator(device=__A ).manual_seed(0 ) UpperCAmelCase_ : List[Any] = alt_pipe( [prompt] ,generator=__A ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,image=__A ,) UpperCAmelCase_ : Tuple = output.images UpperCAmelCase_ : str = torch.Generator(device=__A ).manual_seed(0 ) UpperCAmelCase_ : str = alt_pipe( [prompt] ,generator=__A ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,image=__A ,return_dict=__A ,)[0] UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : str = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3 @unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" ) def A__ ( self: List[Any] ) -> Optional[int]: UpperCAmelCase_ : Tuple = self.dummy_cond_unet UpperCAmelCase_ : int = PNDMScheduler(skip_prk_steps=__A ) UpperCAmelCase_ : int = self.dummy_vae UpperCAmelCase_ : Dict = self.dummy_text_encoder UpperCAmelCase_ : List[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" ) UpperCAmelCase_ : Optional[Any] = 77 UpperCAmelCase_ : Optional[int] = self.dummy_image.to(__A ) # put models in fp16 UpperCAmelCase_ : Any = unet.half() UpperCAmelCase_ : Union[str, Any] = vae.half() UpperCAmelCase_ : Optional[Any] = bert.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Tuple = AltDiffusionImgaImgPipeline( unet=__A ,scheduler=__A ,vae=__A ,text_encoder=__A ,tokenizer=__A ,safety_checker=__A ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=__A ) UpperCAmelCase_ : Union[str, Any] = alt_pipe.to(__A ) alt_pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase_ : int = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = alt_pipe( [prompt] ,generator=__A ,num_inference_steps=2 ,output_type="""np""" ,image=__A ,).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" ) def A__ ( self: Dict ) -> Optional[int]: UpperCAmelCase_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) # resize to resolution that is divisible by 8 but not 16 or 32 UpperCAmelCase_ : Tuple = init_image.resize((760, 504) ) UpperCAmelCase_ : str = """BAAI/AltDiffusion""" UpperCAmelCase_ : str = AltDiffusionImgaImgPipeline.from_pretrained( __A ,safety_checker=__A ,) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() UpperCAmelCase_ : Any = """A fantasy landscape, trending on artstation""" UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : str = pipe( prompt=__A ,image=__A ,strength=0.7_5 ,guidance_scale=7.5 ,generator=__A ,output_type="""np""" ,) UpperCAmelCase_ : Dict = output.images[0] UpperCAmelCase_ : List[str] = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) UpperCAmelCase_ : int = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Tuple ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) UpperCAmelCase_ : Union[str, Any] = init_image.resize((768, 512) ) UpperCAmelCase_ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" ) UpperCAmelCase_ : Union[str, Any] = """BAAI/AltDiffusion""" UpperCAmelCase_ : Any = AltDiffusionImgaImgPipeline.from_pretrained( __A ,safety_checker=__A ,) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() UpperCAmelCase_ : Tuple = """A fantasy landscape, trending on artstation""" UpperCAmelCase_ : int = torch.manual_seed(0 ) UpperCAmelCase_ : Tuple = pipe( prompt=__A ,image=__A ,strength=0.7_5 ,guidance_scale=7.5 ,generator=__A ,output_type="""np""" ,) UpperCAmelCase_ : Optional[int] = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1e-2
345
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
0
'''simple docstring''' import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _UpperCAmelCase ( _lowerCamelCase : Tuple ) -> Dict: _lowerCAmelCase : List[str] = os.path.join(args.tf_model_dir , """parameters.json""" ) _lowerCAmelCase : Tuple = json.loads(open(lowercase__ ).read() ) if not params: raise ValueError( f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith(""".pt""" ): _lowerCAmelCase : List[Any] = args.output + """.pt""" _lowerCAmelCase : Optional[int] = OrderedDict() with tf.device("""/CPU:0""" ): _lowerCAmelCase : Tuple = tf.train.load_checkpoint(args.tf_model_dir ) _lowerCAmelCase : List[Any] = reader.get_variable_to_shape_map() for key_name in shapes.keys(): _lowerCAmelCase : List[str] = reader.get_tensor(lowercase__ ).astype(np.floataa ) if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ): continue if key_name.startswith("""pasts/""" ): if key_name.startswith("""pasts/mlp""" ): _lowerCAmelCase : Dict = int(key_name[9] ) elif key_name.startswith("""pasts/out""" ): _lowerCAmelCase : Any = 8 _lowerCAmelCase : Tuple = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time _lowerCAmelCase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase : List[Any] = torch.tensor(lowercase__ ) elif key_name.startswith("""model/moe""" ): _lowerCAmelCase : Optional[Any] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/switch_gating/kernel""" ): _lowerCAmelCase : int = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player _lowerCAmelCase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase : Union[str, Any] = torch.tensor(lowercase__ ) elif key_name.endswith("""/softmlp/kernel""" ): _lowerCAmelCase : Tuple = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player _lowerCAmelCase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase : Any = torch.tensor(lowercase__ ) elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ): _lowerCAmelCase : Tuple = key_name[-9:-7] for i in range(16 ): _lowerCAmelCase : Dict = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer) _lowerCAmelCase : Optional[Any] = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided _lowerCAmelCase : List[str] = torch.tensor(lowercase__ ) elif key_name.startswith("""model/mlp""" ): _lowerCAmelCase : Optional[Any] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/p1/kernel""" ): _lowerCAmelCase : List[str] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player _lowerCAmelCase : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase : Dict = torch.tensor(lowercase__ ) elif key_name.endswith("""/p1/bias""" ): _lowerCAmelCase : Tuple = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player _lowerCAmelCase : List[str] = vnp.copy() # same because it is one dimensional _lowerCAmelCase : Any = torch.tensor(lowercase__ ) elif key_name.endswith("""/p2/kernel""" ): _lowerCAmelCase : List[Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player _lowerCAmelCase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase : List[Any] = torch.tensor(lowercase__ ) elif key_name.endswith("""/p2/bias""" ): _lowerCAmelCase : Any = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player _lowerCAmelCase : List[Any] = vnp.copy() # same because it is one dimensional _lowerCAmelCase : Tuple = torch.tensor(lowercase__ ) elif key_name.startswith("""model/ln""" ): _lowerCAmelCase : str = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): _lowerCAmelCase : Dict = """model.blocks.%d.feed_forward.norm.bias""" % player _lowerCAmelCase : List[Any] = vnp.copy() # same because it is one dimensional _lowerCAmelCase : List[Any] = torch.tensor(lowercase__ ) elif key_name.endswith("""/g""" ): _lowerCAmelCase : Tuple = """model.blocks.%d.feed_forward.norm.weight""" % player _lowerCAmelCase : Tuple = vnp.copy() # same because it is one dimensional _lowerCAmelCase : List[str] = torch.tensor(lowercase__ ) elif key_name.startswith("""model/att""" ): _lowerCAmelCase : Optional[Any] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/qkv/kernel""" ): _lowerCAmelCase : Any = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum _lowerCAmelCase : str = state[:, 0, :, :] _lowerCAmelCase : int = state[:, 1, :, :] _lowerCAmelCase : Dict = state[:, 2, :, :] _lowerCAmelCase : Any = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase : Dict = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase : Dict = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase : Optional[Any] = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player _lowerCAmelCase : List[str] = torch.tensor(lowercase__ ) _lowerCAmelCase : List[str] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player _lowerCAmelCase : Union[str, Any] = torch.tensor(lowercase__ ) _lowerCAmelCase : str = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player _lowerCAmelCase : Optional[int] = torch.tensor(lowercase__ ) elif key_name.endswith("""/o/kernel""" ): _lowerCAmelCase : List[Any] = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player _lowerCAmelCase : int = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase : Optional[int] = torch.tensor(lowercase__ ) elif key_name.startswith("""model/an""" ): _lowerCAmelCase : Any = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): _lowerCAmelCase : Any = """model.blocks.%d.self_attn.norm.bias""" % player _lowerCAmelCase : Optional[int] = vnp.copy() # same because it is one dimensional _lowerCAmelCase : List[Any] = torch.tensor(lowercase__ ) elif key_name.endswith("""/g""" ): _lowerCAmelCase : Dict = """model.blocks.%d.self_attn.norm.weight""" % player _lowerCAmelCase : Dict = vnp.copy() # same because it is one dimensional _lowerCAmelCase : Tuple = torch.tensor(lowercase__ ) elif ( key_name.startswith("""model/wte""" ) or key_name.startswith("""model/wpe""" ) or key_name.startswith("""model/ete""" ) ): _lowerCAmelCase : Optional[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[ key_name[-3:] ] _lowerCAmelCase : str = """model.%s.weight""" % nlayer _lowerCAmelCase : Union[str, Any] = vnp.copy() # same in embedded _lowerCAmelCase : Optional[Any] = torch.tensor(lowercase__ ) if key_name.startswith("""model/wte""" ): _lowerCAmelCase : Optional[Any] = """lm_head.weight""" _lowerCAmelCase : int = vnp.copy() # same in embedded _lowerCAmelCase : int = torch.tensor(lowercase__ ) elif key_name.startswith("""model/wob""" ): _lowerCAmelCase : Union[str, Any] = """final_logits_bias""" _lowerCAmelCase : Tuple = vnp.copy() # same in embedded _lowerCAmelCase : int = state.reshape((1, -1) ) _lowerCAmelCase : Optional[int] = torch.tensor(lowercase__ ) elif key_name == "model/dense/kernel": _lowerCAmelCase : Optional[int] = """model.last_project.weight""" _lowerCAmelCase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _lowerCAmelCase : List[str] = torch.tensor(lowercase__ ) elif key_name == "model/dense_1/bias": _lowerCAmelCase : Tuple = """model.last_project.bias""" _lowerCAmelCase : List[str] = vnp.copy() # same because it is one dimensional _lowerCAmelCase : List[Any] = torch.tensor(lowercase__ ) torch.save(lowercase__ , args.output ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser( description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""") parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""") UpperCamelCase_ = parser.parse_args() convert_tf_gptsan_to_pt(args)
309
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "levit" def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ :Tuple = image_size lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :Union[str, Any] = kernel_size lowerCAmelCase_ :Optional[Any] = stride lowerCAmelCase_ :Optional[int] = padding lowerCAmelCase_ :Optional[Any] = hidden_sizes lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :int = depths lowerCAmelCase_ :List[str] = key_dim lowerCAmelCase_ :str = drop_path_rate lowerCAmelCase_ :Optional[int] = patch_size lowerCAmelCase_ :Union[str, Any] = attention_ratio lowerCAmelCase_ :Dict = mlp_ratio lowerCAmelCase_ :Any = initializer_range lowerCAmelCase_ :Optional[int] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
84
0
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
151
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Dict = 0.01 with locka.acquire(): with pytest.raises(lowercase__ ): lowerCAmelCase_ :List[Any] = time.time() locka.acquire(lowercase__ ) assert time.time() - _start > timeout def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock""" lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(lowercase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 lowerCAmelCase_ :Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase__ ): locka.acquire(0 )
84
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING a_ : List[Any] = logging.get_logger(__name__) a_ : Any = { 'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json', } class _snake_case ( A__ ): _lowercase : List[str] = "instructblip_vision_model" def __init__( self , a=1408 , a=6144 , a=39 , a=16 , a=224 , a=14 , a="gelu" , a=1E-6 , a=0.0 , a=1E-10 , a=True , **a , ) -> Dict: super().__init__(**__A) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = qkv_bias @classmethod def SCREAMING_SNAKE_CASE__ ( cls , a , **a) -> "PretrainedConfig": cls._set_token_in_kwargs(__A) SCREAMING_SNAKE_CASE = cls.get_config_dict(__A , **__A) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type') == "instructblip": SCREAMING_SNAKE_CASE = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''') return cls.from_dict(__A , **__A) class _snake_case ( A__ ): _lowercase : Optional[int] = "instructblip_qformer" def __init__( self , a=3_0522 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=0.02 , a=1E-12 , a=0 , a="absolute" , a=2 , a=1408 , **a , ) -> List[str]: super().__init__(pad_token_id=__A , **__A) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = cross_attention_frequency SCREAMING_SNAKE_CASE = encoder_hidden_size @classmethod def SCREAMING_SNAKE_CASE__ ( cls , a , **a) -> "PretrainedConfig": cls._set_token_in_kwargs(__A) SCREAMING_SNAKE_CASE = cls.get_config_dict(__A , **__A) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type') == "instructblip": SCREAMING_SNAKE_CASE = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''') return cls.from_dict(__A , **__A) class _snake_case ( A__ ): _lowercase : List[str] = "instructblip" _lowercase : Optional[int] = True def __init__( self , a=None , a=None , a=None , a=32 , **a) -> str: super().__init__(**__A) if vision_config is None: SCREAMING_SNAKE_CASE = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.') if qformer_config is None: SCREAMING_SNAKE_CASE = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.') if text_config is None: SCREAMING_SNAKE_CASE = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).') SCREAMING_SNAKE_CASE = InstructBlipVisionConfig(**__A) SCREAMING_SNAKE_CASE = InstructBlipQFormerConfig(**__A) SCREAMING_SNAKE_CASE = text_config["""model_type"""] if """model_type""" in text_config else """opt""" SCREAMING_SNAKE_CASE = CONFIG_MAPPING[text_model_type](**__A) SCREAMING_SNAKE_CASE = self.text_config.tie_word_embeddings SCREAMING_SNAKE_CASE = self.text_config.is_encoder_decoder SCREAMING_SNAKE_CASE = num_query_tokens SCREAMING_SNAKE_CASE = self.vision_config.hidden_size SCREAMING_SNAKE_CASE = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES SCREAMING_SNAKE_CASE = 1.0 SCREAMING_SNAKE_CASE = 0.02 @classmethod def SCREAMING_SNAKE_CASE__ ( cls , a , a , a , **a , ) -> Tuple: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , ) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE = self.vision_config.to_dict() SCREAMING_SNAKE_CASE = self.qformer_config.to_dict() SCREAMING_SNAKE_CASE = self.text_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output
137
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s __UpperCAmelCase = 3e8 # unit of c : m * s^-1 def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase_ :Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
84
0
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowercase__ : Union[str, Any] = 1.0_54_57_18_17e-34 # unit of ℏ : J * s lowercase__ : List[str] = 3e8 # unit of c : m * s^-1 def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> dict[str, float]: if (force, area, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if force < 0: raise ValueError('''Magnitude of force can not be negative''' ) if distance < 0: raise ValueError('''Distance can not be negative''' ) if area < 0: raise ValueError('''Area can not be negative''' ) if force == 0: lowerCAmelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('''One and only one argument must be 0''' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
338
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int: '''simple docstring''' if len(lowercase__ ) != len(lowercase__ ): raise ValueError("""String lengths must match!""" ) lowerCAmelCase_ :Optional[int] = 0 for chara, chara in zip(lowercase__ , lowercase__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
84
0
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(A__ ) class __A( A__ ): """simple docstring""" def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): super().__init__(*__A , **__A ) requires_backends(self , """vision""" ) self.check_model_type(__A ) def __call__(self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return super().__call__(__A , **__A ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): return {}, {}, {} def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = load_image(__A ) UpperCamelCase__ = image.size UpperCamelCase__ = self.image_processor(images=__A , return_tensors=self.framework ) return model_inputs def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.model(**__A ) return model_outputs def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = model_outputs.predicted_depth UpperCamelCase__ = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=__A ) UpperCamelCase__ = prediction.squeeze().cpu().numpy() UpperCamelCase__ = (output * 2_55 / np.max(__A )).astype("""uint8""" ) UpperCamelCase__ = Image.fromarray(__A ) UpperCamelCase__ = {} UpperCamelCase__ = predicted_depth UpperCamelCase__ = depth return output_dict
244
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :int = nn.ModuleList(__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) # merge samples if i == 0: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample else: lowerCAmelCase_ :str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__A , __A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( __A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , ) idx += 1 lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}""" @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase_ :List[Any] = pretrained_model_path while os.path.isdir(__A ): lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A ) controlnets.append(__A ) idx += 1 lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" ) if len(__A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__A )
84
0
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class a__( A__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Dict = VideoToVideoSDPipeline UpperCAmelCase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {"image", "width", "height"} UpperCAmelCase_ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {"image"} UpperCAmelCase_ : int = PipelineTesterMixin.required_optional_params - {"latents"} UpperCAmelCase_ : Union[str, Any] = False # No `output_type`. UpperCAmelCase_ : Optional[Any] = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def a_ ( self): """simple docstring""" torch.manual_seed(0) lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , ) lowerCAmelCase = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) lowerCAmelCase = CLIPTextModel(__A) lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=0): """simple docstring""" lowerCAmelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__A)).to(__A) if str(__A).startswith("""mps"""): lowerCAmelCase = torch.manual_seed(__A) else: lowerCAmelCase = torch.Generator(device=__A).manual_seed(__A) lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """video""": video, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def a_ ( self): """simple docstring""" lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = VideoToVideoSDPipeline(**__A) lowerCAmelCase = sd_pipe.to(__A) sd_pipe.set_progress_bar_config(disable=__A) lowerCAmelCase = self.get_dummy_inputs(__A) lowerCAmelCase = """np""" lowerCAmelCase = sd_pipe(**__A).frames lowerCAmelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) lowerCAmelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def a_ ( self): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__A , expected_max_diff=5E-3) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""") def a_ ( self): """simple docstring""" pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""") def a_ ( self): """simple docstring""" pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""") def a_ ( self): """simple docstring""" pass def a_ ( self): """simple docstring""" return super().test_progress_bar() @slow @skip_mps class a__( unittest.TestCase ): '''simple docstring''' def a_ ( self): """simple docstring""" lowerCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa) pipe.enable_model_cpu_offload() # 10 frames lowerCAmelCase = torch.Generator(device="""cpu""").manual_seed(0) lowerCAmelCase = torch.randn((1, 10, 3, 1024, 576) , generator=__A) lowerCAmelCase = video.to("""cuda""") lowerCAmelCase = """Spiderman is surfing""" lowerCAmelCase = pipe(__A , video=__A , generator=__A , num_inference_steps=3 , output_type="""pt""").frames lowerCAmelCase = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656]) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array).sum() < 1E-2
272
"""simple docstring""" from PIL import Image def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image: '''simple docstring''' def brightness(lowercase__ : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(lowercase__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 __UpperCAmelCase = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
84
0
from math import isclose, sqrt def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" snake_case = point_y / 4 / point_x snake_case = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) snake_case = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) snake_case = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 snake_case = outgoing_gradient**2 + 4 snake_case = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) snake_case = (point_y - outgoing_gradient * point_x) ** 2 - 1_00 snake_case = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) snake_case = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point snake_case = x_minus if isclose(lowercase__ ,lowercase__ ) else x_plus snake_case = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def UpperCAmelCase__ (UpperCamelCase_ = 1.4 ,UpperCamelCase_ = -9.6 ): """simple docstring""" snake_case = 0 snake_case = first_x_coord snake_case = first_y_coord snake_case = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): snake_case = next_point(lowercase__ ,lowercase__ ,lowercase__ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
127
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _SCREAMING_SNAKE_CASE : def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Dict = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = inputs["""prompt"""] lowerCAmelCase_ :Optional[int] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Optional[int] = inputs["""output_type"""] if "image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""image"""] else: lowerCAmelCase_ :int = None if "mask_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""mask_image"""] else: lowerCAmelCase_ :int = None if "original_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""original_image"""] else: lowerCAmelCase_ :List[Any] = None lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A ) # inputs with prompt converted to embeddings lowerCAmelCase_ :List[str] = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :int = image if mask_image is not None: lowerCAmelCase_ :Tuple = mask_image if original_image is not None: lowerCAmelCase_ :Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__A , __A , __A ) lowerCAmelCase_ :Optional[int] = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Tuple = inputs["""output_type"""] # inputs with prompt converted to embeddings lowerCAmelCase_ :Tuple = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :Optional[int] = image if mask_image is not None: lowerCAmelCase_ :str = mask_image if original_image is not None: lowerCAmelCase_ :Tuple = original_image lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Any = self.get_dummy_components() lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Dict = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 )
84
0
"""simple docstring""" import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training") # TF training parameters A : Union[str, Any] = False A : Tuple = False def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return TrainCommand(lowercase__ ) class _UpperCamelCase ( A__ ): '''simple docstring''' @staticmethod def snake_case ( __a ): __lowerCAmelCase = parser.add_parser("train" , help="CLI tool to train a model on a task." ) train_parser.add_argument( "--train_data" , type=__A , required=__A , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , ) train_parser.add_argument( "--column_label" , type=__A , default=0 , help="Column of the dataset csv file with example labels." ) train_parser.add_argument( "--column_text" , type=__A , default=1 , help="Column of the dataset csv file with example texts." ) train_parser.add_argument( "--column_id" , type=__A , default=2 , help="Column of the dataset csv file with example ids." ) train_parser.add_argument( "--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." ) train_parser.add_argument("--validation_data" , type=__A , default="" , help="path to validation dataset." ) train_parser.add_argument( "--validation_split" , type=__A , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , ) train_parser.add_argument("--output" , type=__A , default="./" , help="path to saved the trained model." ) train_parser.add_argument( "--task" , type=__A , default="text_classification" , help="Task to train the model on." ) train_parser.add_argument( "--model" , type=__A , default="bert-base-uncased" , help="Model's name or path to stored model." ) train_parser.add_argument("--train_batch_size" , type=__A , default=32 , help="Batch size for training." ) train_parser.add_argument("--valid_batch_size" , type=__A , default=64 , help="Batch size for validation." ) train_parser.add_argument("--learning_rate" , type=__A , default=3e-5 , help="Learning rate." ) train_parser.add_argument("--adam_epsilon" , type=__A , default=1e-0_8 , help="Epsilon for Adam optimizer." ) train_parser.set_defaults(func=__A ) def __init__( self , __a ): __lowerCAmelCase = logging.get_logger("transformers-cli/training" ) __lowerCAmelCase = """tf""" if is_tf_available() else """torch""" os.makedirs(args.output , exist_ok=__A ) __lowerCAmelCase = args.output __lowerCAmelCase = args.column_label __lowerCAmelCase = args.column_text __lowerCAmelCase = args.column_id self.logger.info(f"Loading {args.task} pipeline for {args.model}" ) if args.task == "text_classification": __lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f"Loading dataset from {args.train_data}" ) __lowerCAmelCase = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __lowerCAmelCase = None if args.validation_data: self.logger.info(f"Loading validation dataset from {args.validation_data}" ) __lowerCAmelCase = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __lowerCAmelCase = args.validation_split __lowerCAmelCase = args.train_batch_size __lowerCAmelCase = args.valid_batch_size __lowerCAmelCase = args.learning_rate __lowerCAmelCase = args.adam_epsilon def snake_case ( self ): if self.framework == "tf": return self.run_tf() return self.run_torch() def snake_case ( self ): raise NotImplementedError def snake_case ( self ): self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
57
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :int = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :List[Any] = jax.device_count() lowerCAmelCase_ :Optional[Any] = num_samples * [prompt] lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Optional[Any] = replicate(__A ) lowerCAmelCase_ :Union[str, Any] = shard(__A ) lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2""" lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained( __A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :Optional[int] = scheduler_params lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :Tuple = jax.device_count() lowerCAmelCase_ :str = num_samples * [prompt] lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Tuple = replicate(__A ) lowerCAmelCase_ :Optional[int] = shard(__A ) lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
84
0
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A : def __init__( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Tuple=13 , lowercase_ : Optional[int]=30 , lowercase_ : List[Any]=2 , lowercase_ : Union[str, Any]=3 , lowercase_ : Optional[int]=True , lowercase_ : int=True , lowercase_ : int=32 , lowercase_ : int=2 , lowercase_ : Optional[Any]=4 , lowercase_ : str=37 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : int=10 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Tuple=3 , lowercase_ : Dict=0.6 , lowercase_ : str=None , ) -> List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] =parent _lowerCamelCase : Optional[Any] =batch_size _lowerCamelCase : Optional[int] =image_size _lowerCamelCase : Any =patch_size _lowerCamelCase : Dict =num_channels _lowerCamelCase : Optional[Any] =is_training _lowerCamelCase : Tuple =use_labels _lowerCamelCase : List[Any] =hidden_size _lowerCamelCase : Dict =num_hidden_layers _lowerCamelCase : str =num_attention_heads _lowerCamelCase : str =intermediate_size _lowerCamelCase : Optional[int] =hidden_act _lowerCamelCase : Optional[int] =hidden_dropout_prob _lowerCamelCase : List[str] =attention_probs_dropout_prob _lowerCamelCase : str =type_sequence_label_size _lowerCamelCase : Dict =initializer_range _lowerCamelCase : Optional[int] =mask_ratio _lowerCamelCase : Dict =scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _lowerCamelCase : int =(image_size // patch_size) ** 2 _lowerCamelCase : int =int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : str =None if self.use_labels: _lowerCamelCase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase : Any =self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : Optional[Any] ) -> Any: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] ) -> str: """simple docstring""" _lowerCamelCase : List[Any] =TFViTMAEModel(config=__A ) _lowerCamelCase : Union[str, Any] =model(__A , training=__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self : int , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : List[Any] ) -> Optional[int]: """simple docstring""" _lowerCamelCase : int =TFViTMAEForPreTraining(__A ) _lowerCamelCase : str =model(__A , training=__A ) # expected sequence length = num_patches _lowerCamelCase : List[Any] =(self.image_size // self.patch_size) ** 2 _lowerCamelCase : Tuple =self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _lowerCamelCase : List[Any] =1 _lowerCamelCase : List[Any] =TFViTMAEForPreTraining(__A ) _lowerCamelCase : str =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCamelCase : int =model(__A , training=__A ) _lowerCamelCase : int =self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" _lowerCamelCase : List[Any] =self.prepare_config_and_inputs() (_lowerCamelCase) : str =config_and_inputs _lowerCamelCase : int ={"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class A ( A__ , A__ , unittest.TestCase ): UpperCamelCase__ : Tuple =(TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () UpperCamelCase__ : Any ={"feature-extraction": TFViTMAEModel} if is_tf_available() else {} UpperCamelCase__ : Any =False UpperCamelCase__ : List[Any] =False UpperCamelCase__ : Optional[int] =False UpperCamelCase__ : Optional[int] =False def lowerCamelCase ( self : Optional[Any] ) -> int: """simple docstring""" _lowerCamelCase : Dict =TFViTMAEModelTester(self ) _lowerCamelCase : str =ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 ) def lowerCamelCase ( self : int ) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def lowerCamelCase ( self : Dict ) -> int: """simple docstring""" pass def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : int =model_class(__A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _lowerCamelCase : Union[str, Any] =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , tf.keras.layers.Layer ) ) def lowerCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" _lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[str] =model_class(__A ) _lowerCamelCase : Dict =inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : int =[*signature.parameters.keys()] _lowerCamelCase : int =["""pixel_values"""] self.assertListEqual(arg_names[:1] , __A ) def lowerCamelCase ( self : List[Any] ) -> Any: """simple docstring""" _lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def lowerCamelCase ( self : Dict ) -> str: """simple docstring""" _lowerCamelCase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__A ) def lowerCamelCase ( self : List[Any] ) -> int: """simple docstring""" np.random.seed(2 ) _lowerCamelCase : Tuple =self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Optional[Any] =int((config.image_size // config.patch_size) ** 2 ) _lowerCamelCase : List[Any] =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _lowerCamelCase : Optional[int] =model_class(__A ) _lowerCamelCase : Optional[int] =self._prepare_for_class(__A , __A ) _lowerCamelCase : str =model(__A , noise=__A ) _lowerCamelCase : int =copy.deepcopy(self._prepare_for_class(__A , __A ) ) _lowerCamelCase : Optional[Any] =model(**__A , noise=__A ) _lowerCamelCase : Dict =outputs_dict[0].numpy() _lowerCamelCase : Any =outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCamelCase ( self : str ) -> Tuple: """simple docstring""" np.random.seed(2 ) _lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Tuple =int((config.image_size // config.patch_size) ** 2 ) _lowerCamelCase : int =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(lowercase_ : Tuple ): _lowerCamelCase : int ={} for k, v in inputs_dict.items(): if tf.is_tensor(__A ): _lowerCamelCase : Any =v.numpy() else: _lowerCamelCase : Dict =np.array(__A ) return inputs_np_dict for model_class in self.all_model_classes: _lowerCamelCase : Tuple =model_class(__A ) _lowerCamelCase : str =self._prepare_for_class(__A , __A ) _lowerCamelCase : str =prepare_numpy_arrays(__A ) _lowerCamelCase : Any =model(__A , noise=__A ) _lowerCamelCase : int =model(**__A , noise=__A ) self.assert_outputs_same(__A , __A ) def lowerCamelCase ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Dict ) -> List[Any]: """simple docstring""" np.random.seed(2 ) _lowerCamelCase : Optional[Any] =int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) _lowerCamelCase : Dict =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _lowerCamelCase : Optional[Any] =tf.constant(__A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _lowerCamelCase : Optional[int] =tf_noise super().check_pt_tf_models(__A , __A , __A ) def lowerCamelCase ( self : List[Any] ) -> Any: """simple docstring""" np.random.seed(2 ) _lowerCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : int ={ module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(__A ) if module_member_name.endswith('MainLayer' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )] for module_member in (getattr(__A , __A ),) if isinstance(__A , __A ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(__A , '_keras_serializable' , __A ) } _lowerCamelCase : Any =int((config.image_size // config.patch_size) ** 2 ) _lowerCamelCase : List[Any] =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _lowerCamelCase : Optional[Any] =tf.convert_to_tensor(__A ) inputs_dict.update({'noise': noise} ) for main_layer_class in tf_main_layer_classes: _lowerCamelCase : Optional[Any] =main_layer_class(__A ) _lowerCamelCase : Optional[Any] ={ name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } _lowerCamelCase : Tuple =tf.keras.Model(__A , outputs=main_layer(__A ) ) _lowerCamelCase : List[Any] =model(__A ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCamelCase : List[Any] =os.path.join(__A , 'keras_model.h5' ) model.save(__A ) _lowerCamelCase : List[str] =tf.keras.models.load_model( __A , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(__A , tf.keras.Model ) _lowerCamelCase : Optional[Any] =model(__A ) self.assert_outputs_same(__A , __A ) @slow def lowerCamelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" np.random.seed(2 ) _lowerCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Tuple =int((config.image_size // config.patch_size) ** 2 ) _lowerCamelCase : Union[str, Any] =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _lowerCamelCase : List[str] =model_class(__A ) _lowerCamelCase : Optional[int] =self._prepare_for_class(__A , __A ) _lowerCamelCase : Tuple =model(__A , noise=__A ) if model_class.__name__ == "TFViTMAEModel": _lowerCamelCase : Dict =outputs.last_hidden_state.numpy() _lowerCamelCase : int =0 else: _lowerCamelCase : str =outputs.logits.numpy() _lowerCamelCase : str =0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A , saved_model=__A ) _lowerCamelCase : str =model_class.from_pretrained(__A ) _lowerCamelCase : Optional[int] =model(__A , noise=__A ) if model_class.__name__ == "TFViTMAEModel": _lowerCamelCase : Union[str, Any] =after_outputs["""last_hidden_state"""].numpy() _lowerCamelCase : List[str] =0 else: _lowerCamelCase : List[Any] =after_outputs["""logits"""].numpy() _lowerCamelCase : str =0 _lowerCamelCase : List[str] =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__A , 1E-5 ) def lowerCamelCase ( self : Tuple ) -> List[str]: """simple docstring""" np.random.seed(2 ) _lowerCamelCase : int =self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : List[str] =int((config.image_size // config.patch_size) ** 2 ) _lowerCamelCase : List[Any] =np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _lowerCamelCase : List[Any] =model_class(__A ) _lowerCamelCase : Tuple =self._prepare_for_class(__A , __A ) _lowerCamelCase : Tuple =model(__A , noise=__A ) _lowerCamelCase : Optional[int] =model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(__A ) _lowerCamelCase : Optional[int] =model_class.from_config(model.get_config() ) # make sure it also accepts a normal config _lowerCamelCase : Optional[int] =model_class.from_config(model.config ) _lowerCamelCase : str =new_model(__A ) # Build model new_model.set_weights(model.get_weights() ) _lowerCamelCase : Dict =new_model(__A , noise=__A ) self.assert_outputs_same(__A , __A ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def lowerCamelCase ( self : Dict ) -> List[Any]: """simple docstring""" pass @slow def lowerCamelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : Dict =TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(__A ) def a_ ( ): '''simple docstring''' _lowerCamelCase : str =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def lowerCamelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def lowerCamelCase ( self : int ) -> List[str]: """simple docstring""" np.random.seed(2 ) _lowerCamelCase : List[Any] =TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ) _lowerCamelCase : Any =self.default_image_processor _lowerCamelCase : Dict =prepare_img() _lowerCamelCase : int =image_processor(images=__A , return_tensors='tf' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _lowerCamelCase : Tuple =ViTMAEConfig() _lowerCamelCase : List[str] =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _lowerCamelCase : int =np.random.uniform(size=(1, num_patches) ) # forward pass _lowerCamelCase : Union[str, Any] =model(**__A , noise=__A ) # verify the logits _lowerCamelCase : Optional[int] =tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , __A ) _lowerCamelCase : int =tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , __A , atol=1E-4 )
199
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def _snake_case ( ) -> Generator[int, None, None]: '''simple docstring''' lowerCAmelCase_ :dict[int, int] = {} lowerCAmelCase_ :int = 2 while True: lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ ) if factor: lowerCAmelCase_ :Optional[int] = factor + prime while x in factor_map: x += factor lowerCAmelCase_ :List[str] = factor else: lowerCAmelCase_ :Optional[int] = prime yield prime prime += 1 def _snake_case ( lowercase__ : float = 1E10 ) -> int: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = sieve() lowerCAmelCase_ :str = 1 while True: lowerCAmelCase_ :int = next(lowercase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowercase__ ) n += 2 if __name__ == "__main__": print(solution())
84
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int ) -> str: '''simple docstring''' A__ = [[] for _ in range(lowercase__ )] A__ = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(lowercase__ ) <= key: return input_string for position, character in enumerate(lowercase__ ): A__ = position % (lowest * 2) # puts it in bounds A__ = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(lowercase__ ) A__ = ["""""".join(lowercase__ ) for row in temp_grid] A__ = """""".join(lowercase__ ) return output_string def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int ) -> str: '''simple docstring''' A__ = [] A__ = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string A__ = [[] for _ in range(lowercase__ )] # generates template for position in range(len(lowercase__ ) ): A__ = position % (lowest * 2) # puts it in bounds A__ = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) A__ = 0 for row in temp_grid: # fills in the characters A__ = input_string[counter : counter + len(lowercase__ )] grid.append(list(lowercase__ ) ) counter += len(lowercase__ ) A__ = """""" # reads as zigzag for position in range(len(lowercase__ ) ): A__ = position % (lowest * 2) # puts it in bounds A__ = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> dict[int, str]: '''simple docstring''' A__ = {} for key_guess in range(1 , len(lowercase__ ) ): # tries every key A__ = decrypt(lowercase__ , lowercase__ ) return results if __name__ == "__main__": import doctest doctest.testmod()
68
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx" def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]: lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) ) lowerCAmelCase_ :List[Any] = torch.manual_seed(__A ) lowerCAmelCase_ :Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :int = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = self.get_dummy_inputs() lowerCAmelCase_ :List[str] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :str = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs() lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Dict = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[int] = ort.SessionOptions() lowerCAmelCase_ :Dict = False return options def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :List[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :str = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :Dict = output.images lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :List[str] = init_image.resize((128, 128) ) lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" ) lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :int = output.images lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Union[str, Any] = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
84
0
import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) UpperCamelCase_ = logging.getLogger(__name__) def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : List[Any] = argparse.ArgumentParser( description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" ) parser.add_argument("""--file_path""" , type=lowercase__ , default="""data/dump.txt""" , help="""The path to the data.""" ) parser.add_argument("""--tokenizer_type""" , type=lowercase__ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] ) parser.add_argument("""--tokenizer_name""" , type=lowercase__ , default="""bert-base-uncased""" , help="""The tokenizer to use.""" ) parser.add_argument("""--dump_file""" , type=lowercase__ , default="""data/dump""" , help="""The dump file prefix.""" ) UpperCAmelCase_ : Any = parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": UpperCAmelCase_ : Tuple = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase_ : Optional[int] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]` UpperCAmelCase_ : Optional[Any] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase_ : Any = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase_ : str = tokenizer.special_tokens_map["""cls_token"""] # `<s>` UpperCAmelCase_ : Dict = tokenizer.special_tokens_map["""sep_token"""] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase_ : Any = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>` UpperCAmelCase_ : Dict = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , """r""" , encoding="""utf8""" ) as fp: UpperCAmelCase_ : List[Any] = fp.readlines() logger.info("""Start encoding""" ) logger.info(F'''{len(lowercase__ )} examples to process.''' ) UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Tuple = 1_0000 UpperCAmelCase_ : Union[str, Any] = time.time() for text in data: UpperCAmelCase_ : Any = F'''{bos} {text.strip()} {sep}''' UpperCAmelCase_ : List[Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) rslt.append(lowercase__ ) iter += 1 if iter % interval == 0: UpperCAmelCase_ : Optional[Any] = time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) UpperCAmelCase_ : int = time.time() logger.info("""Finished binarization""" ) logger.info(F'''{len(lowercase__ )} examples processed.''' ) UpperCAmelCase_ : Optional[int] = F'''{args.dump_file}.{args.tokenizer_name}.pickle''' UpperCAmelCase_ : Dict = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase_ : Any = [np.uintaa(lowercase__ ) for d in rslt] else: UpperCAmelCase_ : str = [np.intaa(lowercase__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(lowercase__ , """wb""" ) as handle: pickle.dump(rslt_ , lowercase__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
345
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , ) assert hasattr(self , """env""" ) def __lowerCAmelCase ( self , __A ) -> Any: # configuration for running training on smdistributed Model Parallel lowerCAmelCase_ :Union[str, Any] = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase_ :Tuple = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , ) def __lowerCAmelCase ( self , __A ) -> List[Any]: TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def __lowerCAmelCase ( self , __A ) -> List[str]: # create estimator lowerCAmelCase_ :Any = self.create_estimator(__A ) # run training estimator.fit() # result dataframe lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase_ :Optional[int] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
84
0
'''simple docstring''' from __future__ import annotations def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : list[str] | None = None ) -> list[list[str]]: _lowerCAmelCase : List[Any] = word_bank or [] # create a table _lowerCAmelCase : int = len(lowercase__ ) + 1 _lowerCAmelCase : list[list[list[str]]] = [] for _ in range(lowercase__ ): table.append([] ) # seed value _lowerCAmelCase : Optional[Any] = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowercase__ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowercase__ )] == word: _lowerCAmelCase : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowercase__ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowercase__ )]: combination.reverse() return table[len(lowercase__ )] if __name__ == "__main__": print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""])) print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""])) print( all_construct( """hexagonosaurus""", ["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""], ) )
309
"""simple docstring""" def _snake_case ( lowercase__ : int = 1_0 ) -> str: '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError("""Invalid input""" ) lowerCAmelCase_ :List[str] = 1_0**n lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(10) = }""")
84
0
'''simple docstring''' from __future__ import annotations def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : int = get_failure_array(lowercase__ ) # 2) Step through text searching for pattern UpperCAmelCase : Any = 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: UpperCAmelCase : Union[str, Any] = failure[j - 1] continue i += 1 return False def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : Optional[int] = [0] UpperCAmelCase : Optional[int] = 0 UpperCAmelCase : Any = 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: UpperCAmelCase : Union[str, Any] = failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) lowercase__ = "abc1abc12" lowercase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc" lowercase__ = "alskfjaldsk23adsfabcabc" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowercase__ = "ABABX" lowercase__ = "ABABZABABYABABX" assert kmp(pattern, text) # Test 3) lowercase__ = "AAAB" lowercase__ = "ABAAAAAB" assert kmp(pattern, text) # Test 4) lowercase__ = "abcdabcy" lowercase__ = "abcxabcdabxabcdabcdabcy" assert kmp(pattern, text) # Test 5) lowercase__ = "aabaabaaa" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
151
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = 'src/transformers' __UpperCAmelCase = 'docs/source/en/tasks' def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : Any ) -> str: '''simple docstring''' with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCAmelCase_ :List[Any] = f.readlines() # Find the start prompt. lowerCAmelCase_ :Tuple = 0 while not lines[start_index].startswith(lowercase__ ): start_index += 1 start_index += 1 lowerCAmelCase_ :Dict = start_index while not lines[end_index].startswith(lowercase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase_ :List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() ) lowerCAmelCase_ :Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def _snake_case ( lowercase__ : int , lowercase__ : str=False ) -> Dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = _find_text_in_file( filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , ) lowerCAmelCase_ :int = get_model_list_for_task(lowercase__ ) if current_list != new_list: if overwrite: with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" """ to fix this.""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
84
0
import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = multiprocessing.Manager() SCREAMING_SNAKE_CASE = manager.list() SCREAMING_SNAKE_CASE = multiprocessing.Process(target=lowercase__ , args=(check_program, result, timeout)) p.start() p.join(timeout=timeout + 1) if p.is_alive(): p.kill() if not result: result.append('timed out') return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil SCREAMING_SNAKE_CASE = shutil.rmtree SCREAMING_SNAKE_CASE = os.rmdir SCREAMING_SNAKE_CASE = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: SCREAMING_SNAKE_CASE = {} with swallow_io(): with time_limit(lowercase__): exec(lowercase__ , lowercase__) result.append('passed') except TimeoutException: result.append('timed out') except BaseException as e: result.append(F'''failed: {e}''') # Needed for cleaning up. SCREAMING_SNAKE_CASE = rmtree SCREAMING_SNAKE_CASE = rmdir SCREAMING_SNAKE_CASE = chdir @contextlib.contextmanager def lowerCamelCase__ (_UpperCAmelCase): def signal_handler(_UpperCAmelCase , _UpperCAmelCase): raise TimeoutException('Timed out!') signal.setitimer(signal.ITIMER_REAL , lowercase__) signal.signal(signal.SIGALRM , lowercase__) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0) @contextlib.contextmanager def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = WriteOnlyStringIO() with contextlib.redirect_stdout(lowercase__): with contextlib.redirect_stderr(lowercase__): with redirect_stdin(lowercase__): yield @contextlib.contextmanager def lowerCamelCase__ (): with tempfile.TemporaryDirectory() as dirname: with chdir(lowercase__): yield dirname class _snake_case ( A__ ): pass class _snake_case ( io.StringIO ): def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> List[str]: raise OSError def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> Optional[int]: raise OSError def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> List[Any]: raise OSError def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> Dict: return False class _snake_case ( contextlib._RedirectStream ): # type: ignore _lowercase : Union[str, Any] = "stdin" @contextlib.contextmanager def lowerCamelCase__ (_UpperCAmelCase): if root == ".": yield return SCREAMING_SNAKE_CASE = os.getcwd() os.chdir(lowercase__) try: yield except BaseException as exc: raise exc finally: os.chdir(lowercase__) def lowerCamelCase__ (_UpperCAmelCase=None): if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes)) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes)) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes)) faulthandler.disable() import builtins SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None import os SCREAMING_SNAKE_CASE = """1""" SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None import shutil SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None import subprocess SCREAMING_SNAKE_CASE = None # type: ignore SCREAMING_SNAKE_CASE = None import sys SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None
137
"""simple docstring""" def _snake_case ( lowercase__ : list[int] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = [] if len(lowercase__ ) == 1: return [nums.copy()] for _ in range(len(lowercase__ ) ): lowerCAmelCase_ :Optional[Any] = nums.pop(0 ) lowerCAmelCase_ :str = permute(lowercase__ ) for perm in permutations: perm.append(lowercase__ ) result.extend(lowercase__ ) nums.append(lowercase__ ) return result def _snake_case ( lowercase__ : Tuple ) -> List[str]: '''simple docstring''' def backtrack(lowercase__ : str ): if start == len(lowercase__ ) - 1: output.append(nums[:] ) else: for i in range(lowercase__ , len(lowercase__ ) ): lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] backtrack(start + 1 ) lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] # backtrack lowerCAmelCase_ :int = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function __UpperCAmelCase = permutea([1, 2, 3]) print(res) doctest.testmod()
84
0
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ = 0 , snake_case__ = 0 ) -> int: lowerCAmelCase = right or len(lowercase__ ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(lowercase__ , lowercase__ , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
338
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = BioGptTokenizer UpperCAmelCase_ :str = False def __lowerCAmelCase ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[Any] = """lower newer""" lowerCAmelCase_ :Tuple = """lower newer""" return input_text, output_text def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase_ :Union[str, Any] = """lower""" lowerCAmelCase_ :Any = ["""low""", """er</w>"""] lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = tokens + ["""<unk>"""] lowerCAmelCase_ :List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
84
0
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCamelCase_ = {'''UserAgent''': UserAgent().random} def __magic_name__ ( __a : List[Any] ): '''simple docstring''' UpperCamelCase__ = script.contents[0] UpperCamelCase__ = json.loads(data[data.find("""{\"config\"""" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = F"https://www.instagram.com/{username}/" UpperCamelCase__ = self.get_json() def UpperCAmelCase_ (self ): UpperCamelCase__ = requests.get(self.url , headers=__A ).text UpperCamelCase__ = BeautifulSoup(__A , """html.parser""" ).find_all("""script""" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self ): return F"{self.__class__.__name__}('{self.username}')" def __str__(self ): return F"{self.fullname} ({self.username}) is {self.biography}" @property def UpperCAmelCase_ (self ): return self.user_data["username"] @property def UpperCAmelCase_ (self ): return self.user_data["full_name"] @property def UpperCAmelCase_ (self ): return self.user_data["biography"] @property def UpperCAmelCase_ (self ): return self.user_data["business_email"] @property def UpperCAmelCase_ (self ): return self.user_data["external_url"] @property def UpperCAmelCase_ (self ): return self.user_data["edge_followed_by"]["count"] @property def UpperCAmelCase_ (self ): return self.user_data["edge_follow"]["count"] @property def UpperCAmelCase_ (self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def UpperCAmelCase_ (self ): return self.user_data["profile_pic_url_hd"] @property def UpperCAmelCase_ (self ): return self.user_data["is_verified"] @property def UpperCAmelCase_ (self ): return self.user_data["is_private"] def __magic_name__ ( __a : str = "github" ): '''simple docstring''' import os if os.environ.get("""CI""" ): return # test failing on GitHub Actions UpperCamelCase__ = InstagramUser(lowercase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowercase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120_000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("""https://instagram.""" ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase_ = InstagramUser('''github''') print(instagram_user) print(f'{instagram_user.number_of_posts = }') print(f'{instagram_user.number_of_followers = }') print(f'{instagram_user.number_of_followings = }') print(f'{instagram_user.email = }') print(f'{instagram_user.website = }') print(f'{instagram_user.profile_picture_url = }') print(f'{instagram_user.is_verified = }') print(f'{instagram_user.is_private = }')
244
"""simple docstring""" from ...configuration_utils import PretrainedConfig class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "bert-generation" def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Any = vocab_size lowerCAmelCase_ :List[Any] = hidden_size lowerCAmelCase_ :Optional[int] = num_hidden_layers lowerCAmelCase_ :int = num_attention_heads lowerCAmelCase_ :List[Any] = hidden_act lowerCAmelCase_ :Optional[Any] = intermediate_size lowerCAmelCase_ :List[Any] = hidden_dropout_prob lowerCAmelCase_ :int = attention_probs_dropout_prob lowerCAmelCase_ :Tuple = max_position_embeddings lowerCAmelCase_ :List[str] = initializer_range lowerCAmelCase_ :Union[str, Any] = layer_norm_eps lowerCAmelCase_ :List[str] = position_embedding_type lowerCAmelCase_ :Optional[int] = use_cache
84
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowercase = { '''configuration_upernet''': ['''UperNetConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''UperNetForSemanticSegmentation''', '''UperNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
272
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [False] * len(lowercase__ ) lowerCAmelCase_ :str = [] queue.append(lowercase__ ) lowerCAmelCase_ :Any = True while queue: lowerCAmelCase_ :Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :int = u return visited[t] def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ )) lowerCAmelCase_ :str = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = float("""Inf""" ) lowerCAmelCase_ :List[str] = sink while s != source: # Find the minimum value in select path lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] ) lowerCAmelCase_ :Union[str, Any] = parent[s] max_flow += path_flow lowerCAmelCase_ :Tuple = sink while v != source: lowerCAmelCase_ :List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase_ :Union[str, Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
84
0
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort _SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Optional[int] = { "tensor(bool)": np.bool_, "tensor(int8)": np.inta, "tensor(uint8)": np.uinta, "tensor(int16)": np.intaa, "tensor(uint16)": np.uintaa, "tensor(int32)": np.intaa, "tensor(uint32)": np.uintaa, "tensor(int64)": np.intaa, "tensor(uint64)": np.uintaa, "tensor(float16)": np.floataa, "tensor(float)": np.floataa, "tensor(double)": np.floataa, } class A__ : """simple docstring""" def __init__( self , __snake_case=None , **__snake_case ): logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' ) snake_case = model snake_case = kwargs.get('''model_save_dir''' , __A ) snake_case = kwargs.get('''latest_model_name''' , __A ) def __call__( self , **__snake_case ): snake_case = {k: np.array(__A ) for k, v in kwargs.items()} return self.model.run(__A , __A ) @staticmethod def a_ ( __snake_case , __snake_case=None , __snake_case=None ): if provider is None: logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' ) snake_case = """CPUExecutionProvider""" return ort.InferenceSession(__A , providers=[provider] , sess_options=__A ) def a_ ( self , __snake_case , __snake_case = None , **__snake_case ): snake_case = file_name if file_name is not None else ONNX_WEIGHTS_NAME snake_case = self.model_save_dir.joinpath(self.latest_model_name ) snake_case = Path(__A ).joinpath(__A ) try: shutil.copyfile(__A , __A ) except shutil.SameFileError: pass # copy external weights (for models >2GB) snake_case = self.model_save_dir.joinpath(__A ) if src_path.exists(): snake_case = Path(__A ).joinpath(__A ) try: shutil.copyfile(__A , __A ) except shutil.SameFileError: pass def a_ ( self , __snake_case , **__snake_case , ): if os.path.isfile(__A ): logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' ) return os.makedirs(__A , exist_ok=__A ) # saving model weights/files self._save_pretrained(__A , **__A ) @classmethod def a_ ( cls , __snake_case , __snake_case = None , __snake_case = None , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , **__snake_case , ): snake_case = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(__A ): snake_case = OnnxRuntimeModel.load_model( os.path.join(__A , __A ) , provider=__A , sess_options=__A ) snake_case = Path(__A ) # load model from hub else: # download model snake_case = hf_hub_download( repo_id=__A , filename=__A , use_auth_token=__A , revision=__A , cache_dir=__A , force_download=__A , ) snake_case = Path(__A ).parent snake_case = Path(__A ).name snake_case = OnnxRuntimeModel.load_model(__A , provider=__A , sess_options=__A ) return cls(model=__A , **__A ) @classmethod def a_ ( cls , __snake_case , __snake_case = True , __snake_case = None , __snake_case = None , **__snake_case , ): snake_case = None if len(str(__A ).split('''@''' ) ) == 2: snake_case = model_id.split('''@''' ) return cls._from_pretrained( model_id=__A , revision=__A , cache_dir=__A , force_download=__A , use_auth_token=__A , **__A , )
127
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 1_0 lowerCAmelCase_ :Optional[int] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) lowerCAmelCase_ :int = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0, """id""": list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" lowerCAmelCase_ :List[Any] = FILE_CONTENT with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> Tuple: '''simple docstring''' import bza lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' import gzip lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with gzip.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" ) with lza.frame.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any: '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' import tarfile lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' import lzma lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" ) with lzma.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import zipfile lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" ) with zstd.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml""" lowerCAmelCase_ :Any = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Any: '''simple docstring''' lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ ) lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: lowerCAmelCase_ :Union[str, Any] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' import bza lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(lowercase__ , """rb""" ) as f: lowerCAmelCase_ :Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) lowerCAmelCase_ :Optional[Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(lowercase__ , """wb""" ) as f: lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> int: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Tuple: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) return data_dir
84
0
"""simple docstring""" from __future__ import annotations def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , ): '''simple docstring''' __lowerCAmelCase = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) __lowerCAmelCase = { """a""": 0.0_84_97, """b""": 0.0_14_92, """c""": 0.0_22_02, """d""": 0.0_42_53, """e""": 0.1_11_62, """f""": 0.0_22_28, """g""": 0.0_20_15, """h""": 0.0_60_94, """i""": 0.0_75_46, """j""": 0.0_01_53, """k""": 0.0_12_92, """l""": 0.0_40_25, """m""": 0.0_24_06, """n""": 0.0_67_49, """o""": 0.0_75_07, """p""": 0.0_19_29, """q""": 0.0_00_95, """r""": 0.0_75_87, """s""": 0.0_63_27, """t""": 0.0_93_56, """u""": 0.0_27_58, """v""": 0.0_09_78, """w""": 0.0_25_60, """x""": 0.0_01_50, """y""": 0.0_19_94, """z""": 0.0_00_77, } else: # Custom frequencies dictionary __lowerCAmelCase = frequencies_dict if not case_sensitive: __lowerCAmelCase = ciphertext.lower() # Chi squared statistic values __lowerCAmelCase = {} # cycle through all of the shifts for shift in range(len(lowercase__ ) ): __lowerCAmelCase = """""" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet __lowerCAmelCase = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter __lowerCAmelCase = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: __lowerCAmelCase = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message __lowerCAmelCase = decrypted_with_shift.lower().count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __lowerCAmelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula __lowerCAmelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message __lowerCAmelCase = decrypted_with_shift.count(lowercase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __lowerCAmelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula __lowerCAmelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary __lowerCAmelCase = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_UpperCamelCase ) -> tuple[float, str]: return chi_squared_statistic_values[key] __lowerCAmelCase = min( lowercase__ , key=lowercase__ , ) # Get all the data from the most likely cipher (key, decoded message) ( __lowerCAmelCase ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
57
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[Any] = "data2vec-text" def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Dict = vocab_size lowerCAmelCase_ :Dict = hidden_size lowerCAmelCase_ :int = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :Any = hidden_act lowerCAmelCase_ :Optional[int] = intermediate_size lowerCAmelCase_ :str = hidden_dropout_prob lowerCAmelCase_ :Any = attention_probs_dropout_prob lowerCAmelCase_ :str = max_position_embeddings lowerCAmelCase_ :int = type_vocab_size lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :List[Any] = layer_norm_eps lowerCAmelCase_ :List[Any] = position_embedding_type lowerCAmelCase_ :List[Any] = use_cache lowerCAmelCase_ :List[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
84
0
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCamelCase = logging.get_logger(__name__) class A ( A__ ): UpperCamelCase__ : Optional[int] =["pixel_values"] def __init__( self : Dict , lowercase_ : Optional[Any] = True , lowercase_ : List[Any] = None , lowercase_ : List[str] = PILImageResampling.BICUBIC , lowercase_ : Union[str, Any] = True , lowercase_ : List[str] = None , lowercase_ : Dict = True , lowercase_ : Union[str, Any] = 1 / 255 , lowercase_ : str = True , lowercase_ : str = IMAGENET_DEFAULT_MEAN , lowercase_ : Optional[Any] = IMAGENET_DEFAULT_STD , **lowercase_ : Any , ) -> None: """simple docstring""" super().__init__(**__A ) _lowerCamelCase : Optional[int] =size if size is not None else {"""shortest_edge""": 224} _lowerCamelCase : Optional[Any] =get_size_dict(__A , default_to_square=__A ) _lowerCamelCase : int =crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCamelCase : Dict =get_size_dict(__A , param_name='crop_size' ) _lowerCamelCase : int =do_resize _lowerCamelCase : Optional[Any] =size _lowerCamelCase : str =resample _lowerCamelCase : str =do_center_crop _lowerCamelCase : List[Any] =crop_size _lowerCamelCase : Optional[Any] =do_rescale _lowerCamelCase : Optional[int] =rescale_factor _lowerCamelCase : Any =do_normalize _lowerCamelCase : List[str] =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _lowerCamelCase : Union[str, Any] =image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowerCamelCase ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : str = PILImageResampling.BICUBIC , lowercase_ : Tuple = None , **lowercase_ : Any , ) -> np.ndarray: """simple docstring""" _lowerCamelCase : Any =get_size_dict(__A , default_to_square=__A ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _lowerCamelCase : Optional[Any] =int((256 / 224) * size['shortest_edge'] ) _lowerCamelCase : Tuple =get_resize_output_image_size(__A , size=__A , default_to_square=__A ) _lowerCamelCase : str ={"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( __A , size=(size_dict['height'], size_dict['width']) , resample=__A , data_format=__A , **__A ) def lowerCamelCase ( self : Tuple , lowercase_ : int , lowercase_ : Any , lowercase_ : List[Any] = None , **lowercase_ : List[str] , ) -> np.ndarray: """simple docstring""" _lowerCamelCase : Any =get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(__A , size=(size['height'], size['width']) , data_format=__A , **__A ) def lowerCamelCase ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : int = None , **lowercase_ : Optional[Any] , ) -> np.ndarray: """simple docstring""" return rescale(__A , scale=__A , data_format=__A , **__A ) def lowerCamelCase ( self : str , lowercase_ : Any , lowercase_ : int , lowercase_ : int , lowercase_ : Optional[Any] = None , **lowercase_ : List[Any] , ) -> np.ndarray: """simple docstring""" return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def lowerCamelCase ( self : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] = None , lowercase_ : Optional[int] = None , lowercase_ : Dict = None , lowercase_ : Any = None , lowercase_ : str = None , lowercase_ : str = None , lowercase_ : Dict = None , lowercase_ : Dict = None , lowercase_ : Tuple = None , lowercase_ : Tuple = None , lowercase_ : Optional[Any] = None , lowercase_ : int = ChannelDimension.FIRST , **lowercase_ : int , ) -> BatchFeature: """simple docstring""" _lowerCamelCase : Union[str, Any] =do_resize if do_resize is not None else self.do_resize _lowerCamelCase : Union[str, Any] =resample if resample is not None else self.resample _lowerCamelCase : Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : str =rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : str =do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : Any =image_mean if image_mean is not None else self.image_mean _lowerCamelCase : List[Any] =image_std if image_std is not None else self.image_std _lowerCamelCase : List[Any] =size if size is not None else self.size _lowerCamelCase : str =get_size_dict(__A , default_to_square=__A ) _lowerCamelCase : Optional[int] =crop_size if crop_size is not None else self.crop_size _lowerCamelCase : int =get_size_dict(__A , param_name='crop_size' ) _lowerCamelCase : Optional[Any] =make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. _lowerCamelCase : Any =[to_numpy_array(__A ) for image in images] if do_resize: _lowerCamelCase : Tuple =[self.resize(__A , __A , __A ) for image in images] if do_center_crop: _lowerCamelCase : int =[self.center_crop(__A , __A ) for image in images] if do_rescale: _lowerCamelCase : Optional[int] =[self.rescale(__A , __A ) for image in images] if do_normalize: _lowerCamelCase : List[str] =[self.normalize(__A , __A , __A ) for image in images] _lowerCamelCase : Tuple =[to_channel_dimension_format(__A , __A ) for image in images] _lowerCamelCase : List[str] ={"""pixel_values""": images} return BatchFeature(data=__A , tensor_type=__A )
199
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase_ :Tuple = (wi_a, wi_a) else: lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ :List[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ :Optional[Any] = layer_norm lowerCAmelCase_ :Any = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Tuple = q.T lowerCAmelCase_ :str = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :List[Any] = wi[0].T lowerCAmelCase_ :Dict = wi[1].T else: lowerCAmelCase_ :int = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ :List[Any] = layer_norm lowerCAmelCase_ :List[str] = k.T lowerCAmelCase_ :Any = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :Dict = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ :Optional[int] = layer_norm lowerCAmelCase_ :str = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :int = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ :List[Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :Any = wi[0].T lowerCAmelCase_ :Any = wi[1].T else: lowerCAmelCase_ :Tuple = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ :Optional[Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T return new def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ :Any = state_dict["""shared.weight"""] return state_dict def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
84
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""", } class a__ ( A__ ): """simple docstring""" __lowerCamelCase = "data2vec-text" def __init__( self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple: '''simple docstring''' super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class a__ ( A__ ): """simple docstring""" @property def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
68
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Optional[Any] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" ) if "norm" in key: lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" ) if "layer_norm1" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )] lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" ) if "attn.q" in key: lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" ) if "bot_conv" in key: lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCAmelCase_ :List[Any] = value return new_state_dict def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ :Optional[Any] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ :List[Any] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :] def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int: '''simple docstring''' lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor() # prepare image lowerCAmelCase_ :List[Any] = prepare_img() lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) ) # rename keys lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ ) # key and value matrices need special treatment read_in_k_v(lowercase__ , lowercase__ ) # create HuggingFace model and load state dict lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # forward pass lowerCAmelCase_ :Dict = model(lowercase__ ) lowerCAmelCase_ :Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase_ :Optional[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase_ :Any = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __UpperCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
84
0
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _snake_case ( yaml.SafeLoader ): '''simple docstring''' def A__ ( self: Tuple ,lowerCamelCase_: List[str] ) -> Optional[int]: UpperCAmelCase_ : Dict = [self.constructed_objects[key_node] for key_node, _ in node.value] UpperCAmelCase_ : Optional[int] = [tuple(__A ) if isinstance(__A ,__A ) else key for key in keys] UpperCAmelCase_ : List[Any] = Counter(__A ) UpperCAmelCase_ : List[str] = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' ) def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int]=False ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = super().construct_mapping(__A ,deep=__A ) self._check_no_duplicates_on_constructed_node(__A ) return mapping def lowerCamelCase_ ( _a : str ): '''simple docstring''' UpperCAmelCase_ : List[Any] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: UpperCAmelCase_ : Optional[Any] = full_content[1:].index("""---""" ) + 1 UpperCAmelCase_ : List[Any] = """\n""".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(lowercase__ ) class _snake_case ( A__ ): '''simple docstring''' A__ : Union[str, Any] = {"train_eval_index"} # train-eval-index in the YAML metadata @classmethod def A__ ( cls: Optional[int] ,lowerCamelCase_: Dict ) -> "DatasetMetadata": with open(__A ,encoding="""utf-8""" ) as readme_file: UpperCAmelCase_ : Tuple = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(__A ) else: return cls() def A__ ( self: Dict ,lowerCamelCase_: Dict ) -> List[Any]: if path.exists(): with open(__A ,encoding="""utf-8""" ) as readme_file: UpperCAmelCase_ : List[str] = readme_file.read() else: UpperCAmelCase_ : str = None UpperCAmelCase_ : Optional[Any] = self._to_readme(__A ) with open(__A ,"""w""" ,encoding="""utf-8""" ) as readme_file: readme_file.write(__A ) def A__ ( self: Optional[int] ,lowerCamelCase_: Union[str, Any] = None ) -> str: if readme_content is not None: UpperCAmelCase_ : List[str] = _split_yaml_from_readme(__A ) UpperCAmelCase_ : Optional[int] = """---\n""" + self.to_yaml_string() + """---\n""" + content else: UpperCAmelCase_ : int = """---\n""" + self.to_yaml_string() + """---\n""" return full_content @classmethod def A__ ( cls: List[Any] ,lowerCamelCase_: List[Any] ) -> "DatasetMetadata": UpperCAmelCase_ : int = yaml.load(__A ,Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields UpperCAmelCase_ : Tuple = { (key.replace("""-""" ,"""_""" ) if key.replace("""-""" ,"""_""" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**__A ) def A__ ( self: List[Any] ) -> str: return yaml.safe_dump( { (key.replace("""_""" ,"""-""" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } ,sort_keys=__A ,allow_unicode=__A ,encoding="""utf-8""" ,).decode("""utf-8""" ) UpperCamelCase_ = { '''image-classification''': [], '''translation''': [], '''image-segmentation''': [], '''fill-mask''': [], '''automatic-speech-recognition''': [], '''token-classification''': [], '''sentence-similarity''': [], '''audio-classification''': [], '''question-answering''': [], '''summarization''': [], '''zero-shot-classification''': [], '''table-to-text''': [], '''feature-extraction''': [], '''other''': [], '''multiple-choice''': [], '''text-classification''': [], '''text-to-image''': [], '''text2text-generation''': [], '''zero-shot-image-classification''': [], '''tabular-classification''': [], '''tabular-regression''': [], '''image-to-image''': [], '''tabular-to-text''': [], '''unconditional-image-generation''': [], '''text-retrieval''': [], '''text-to-speech''': [], '''object-detection''': [], '''audio-to-audio''': [], '''text-generation''': [], '''conversational''': [], '''table-question-answering''': [], '''visual-question-answering''': [], '''image-to-text''': [], '''reinforcement-learning''': [], '''voice-activity-detection''': [], '''time-series-forecasting''': [], '''document-question-answering''': [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCamelCase_ = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''') ap.add_argument('''readme_filepath''') UpperCamelCase_ = ap.parse_args() UpperCamelCase_ = Path(args.readme_filepath) UpperCamelCase_ = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
345
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
0
'''simple docstring''' import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model UpperCamelCase_ = """0.12""" # assumed parallelism: 8 if is_torch_available(): import torch def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=None ) -> str: if rng is None: _lowerCAmelCase : Tuple = random.Random() _lowerCAmelCase : Tuple = 1 for dim in shape: total_dims *= dim _lowerCAmelCase : Union[str, Any] = [] for _ in range(lowercase__ ): values.append(rng.randint(0 , vocab_size - 1 ) ) _lowerCAmelCase : Tuple = np.array(lowercase__ , dtype=jnp.intaa ).reshape(lowercase__ ) return output def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Any=None ) -> int: _lowerCAmelCase : Dict = ids_tensor(lowercase__ , vocab_size=2 , rng=lowercase__ ) # make sure that at least one token is attended to for each batch _lowerCAmelCase : str = 1 return attn_mask @require_flax class a_ : __lowerCAmelCase : Any = None __lowerCAmelCase : List[str] = () def __UpperCamelCase ( self ): _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _lowerCAmelCase : int = 2 _lowerCAmelCase : Optional[int] = inputs["""input_ids"""].shape[-1] // 2 _lowerCAmelCase : List[str] = inputs["""input_ids"""][:max_batch_size, :sequence_length] _lowerCAmelCase : List[str] = jnp.ones_like(__A ) _lowerCAmelCase : Optional[Any] = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _lowerCAmelCase : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _lowerCAmelCase : List[str] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def __UpperCamelCase ( self ): _lowerCAmelCase : List[Any] = self._get_input_ids_and_config() _lowerCAmelCase : int = False _lowerCAmelCase : List[Any] = max_length _lowerCAmelCase : Any = 0 for model_class in self.all_generative_model_classes: _lowerCAmelCase : Dict = model_class(__A ) _lowerCAmelCase : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning _lowerCAmelCase : Optional[Any] = getattr(__A , __A ) _lowerCAmelCase : Tuple = pt_model_class(__A ).eval() _lowerCAmelCase : int = load_flax_weights_in_pytorch_model(__A , flax_model.params ) _lowerCAmelCase : List[str] = flax_model.generate(__A ).sequences _lowerCAmelCase : Optional[Any] = pt_model.generate(torch.tensor(__A , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _lowerCAmelCase : List[str] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def __UpperCamelCase ( self ): _lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config() _lowerCAmelCase : Optional[int] = False _lowerCAmelCase : Dict = max_length for model_class in self.all_generative_model_classes: _lowerCAmelCase : str = model_class(__A ) _lowerCAmelCase : Optional[int] = model.generate(__A ).sequences self.assertEqual(generation_outputs.shape[-1] , __A ) _lowerCAmelCase : Optional[Any] = jit(model.generate ) _lowerCAmelCase : Optional[int] = jit_generate(__A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCamelCase ( self ): _lowerCAmelCase : List[str] = self._get_input_ids_and_config() _lowerCAmelCase : List[Any] = True _lowerCAmelCase : Optional[int] = max_length for model_class in self.all_generative_model_classes: _lowerCAmelCase : str = model_class(__A ) _lowerCAmelCase : Optional[int] = model.generate(__A ).sequences self.assertEqual(generation_outputs.shape[-1] , __A ) _lowerCAmelCase : Any = jit(model.generate ) _lowerCAmelCase : List[str] = jit_generate(__A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCamelCase ( self ): _lowerCAmelCase : List[str] = self._get_input_ids_and_config() _lowerCAmelCase : Dict = False _lowerCAmelCase : Any = max_length _lowerCAmelCase : Tuple = 2 for model_class in self.all_generative_model_classes: _lowerCAmelCase : Union[str, Any] = model_class(__A ) _lowerCAmelCase : Optional[Any] = model.generate(__A ).sequences self.assertEqual(generation_outputs.shape[-1] , __A ) _lowerCAmelCase : Optional[Any] = jit(model.generate ) _lowerCAmelCase : Tuple = jit_generate(__A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCamelCase ( self ): _lowerCAmelCase : Dict = self._get_input_ids_and_config() _lowerCAmelCase : Any = False _lowerCAmelCase : Any = max_length _lowerCAmelCase : Union[str, Any] = 2 _lowerCAmelCase : int = 2 for model_class in self.all_generative_model_classes: _lowerCAmelCase : Union[str, Any] = model_class(__A ) _lowerCAmelCase : Any = model.generate(__A ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCAmelCase : str = True _lowerCAmelCase : Optional[int] = max_length _lowerCAmelCase : Optional[int] = 0.8 _lowerCAmelCase : Dict = 1_0 _lowerCAmelCase : List[Any] = 0.3 _lowerCAmelCase : Union[str, Any] = 1 _lowerCAmelCase : Union[str, Any] = 8 _lowerCAmelCase : Union[str, Any] = 9 for model_class in self.all_generative_model_classes: _lowerCAmelCase : Union[str, Any] = model_class(__A ) _lowerCAmelCase : Any = model.generate(__A ).sequences self.assertEqual(generation_outputs.shape[-1] , __A ) _lowerCAmelCase : List[str] = jit(model.generate ) _lowerCAmelCase : int = jit_generate(__A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config() _lowerCAmelCase : Optional[int] = max_length _lowerCAmelCase : Tuple = 1 _lowerCAmelCase : Optional[Any] = 8 _lowerCAmelCase : Optional[Any] = 9 for model_class in self.all_generative_model_classes: _lowerCAmelCase : Dict = model_class(__A ) _lowerCAmelCase : List[Any] = model.generate(__A ).sequences self.assertEqual(generation_outputs.shape[-1] , __A ) _lowerCAmelCase : Optional[int] = jit(model.generate ) _lowerCAmelCase : Optional[Any] = jit_generate(__A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCamelCase ( self ): _lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config() _lowerCAmelCase : List[str] = max_length _lowerCAmelCase : Tuple = 2 _lowerCAmelCase : Dict = 1 _lowerCAmelCase : List[str] = 8 _lowerCAmelCase : Any = 9 for model_class in self.all_generative_model_classes: _lowerCAmelCase : str = model_class(__A ) _lowerCAmelCase : Optional[Any] = model.generate(__A ).sequences self.assertEqual(generation_outputs.shape[-1] , __A ) _lowerCAmelCase : Union[str, Any] = jit(model.generate ) _lowerCAmelCase : str = jit_generate(__A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config() # pad attention mask on the left _lowerCAmelCase : Tuple = attention_mask.at[(0, 0)].set(0 ) _lowerCAmelCase : Any = False _lowerCAmelCase : Dict = max_length for model_class in self.all_generative_model_classes: _lowerCAmelCase : List[Any] = model_class(__A ) _lowerCAmelCase : Any = model.generate(__A , attention_mask=__A ).sequences self.assertEqual(generation_outputs.shape[-1] , __A ) _lowerCAmelCase : int = jit(model.generate ) _lowerCAmelCase : List[str] = jit_generate(__A , attention_mask=__A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCamelCase ( self ): _lowerCAmelCase : List[str] = self._get_input_ids_and_config() # pad attention mask on the left _lowerCAmelCase : int = attention_mask.at[(0, 0)].set(0 ) _lowerCAmelCase : List[str] = True _lowerCAmelCase : Optional[Any] = max_length for model_class in self.all_generative_model_classes: _lowerCAmelCase : Dict = model_class(__A ) _lowerCAmelCase : Optional[Any] = model.generate(__A , attention_mask=__A ).sequences self.assertEqual(generation_outputs.shape[-1] , __A ) _lowerCAmelCase : Dict = jit(model.generate ) _lowerCAmelCase : int = jit_generate(__A , attention_mask=__A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCamelCase ( self ): _lowerCAmelCase : int = self._get_input_ids_and_config() # pad attention mask on the left _lowerCAmelCase : Any = attention_mask.at[(0, 0)].set(0 ) _lowerCAmelCase : List[str] = 2 _lowerCAmelCase : Optional[int] = max_length for model_class in self.all_generative_model_classes: _lowerCAmelCase : Optional[Any] = model_class(__A ) _lowerCAmelCase : Dict = model.generate(__A , attention_mask=__A ).sequences self.assertEqual(generation_outputs.shape[-1] , __A ) _lowerCAmelCase : Dict = jit(model.generate ) _lowerCAmelCase : str = jit_generate(__A , attention_mask=__A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class a_ (unittest.TestCase ): def __UpperCamelCase ( self ): _lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" ) _lowerCAmelCase : Tuple = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) _lowerCAmelCase : Any = """Hello world""" _lowerCAmelCase : Union[str, Any] = tokenizer(__A , return_tensors="""np""" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__A , """do_samples""" ): model.generate(__A , do_samples=__A ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__A , """foo""" ): _lowerCAmelCase : int = {"""foo""": """bar"""} model.generate(__A , **__A )
309
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "levit" def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ :Tuple = image_size lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :Union[str, Any] = kernel_size lowerCAmelCase_ :Optional[Any] = stride lowerCAmelCase_ :Optional[int] = padding lowerCAmelCase_ :Optional[Any] = hidden_sizes lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :int = depths lowerCAmelCase_ :List[str] = key_dim lowerCAmelCase_ :str = drop_path_rate lowerCAmelCase_ :Optional[int] = patch_size lowerCAmelCase_ :Union[str, Any] = attention_ratio lowerCAmelCase_ :Dict = mlp_ratio lowerCAmelCase_ :Any = initializer_range lowerCAmelCase_ :Optional[int] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
84
0
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A_ ( A__ ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = (UnCLIPScheduler,) def UpperCAmelCase_ ( self : Optional[int] , **lowercase_ : Optional[int] ) -> int: UpperCAmelCase : Dict = { """num_train_timesteps""": 1_000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**__A ) return config def UpperCAmelCase_ ( self : List[str] ) -> Dict: for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=__A ) def UpperCAmelCase_ ( self : Tuple ) -> Any: for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=__A ) def UpperCAmelCase_ ( self : int ) -> str: for clip_sample in [True, False]: self.check_over_configs(clip_sample=__A ) def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=__A ) def UpperCAmelCase_ ( self : Optional[int] ) -> str: for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=__A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=__A , prev_timestep=__A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> int: UpperCAmelCase : int = self.scheduler_classes[0] UpperCAmelCase : Optional[Any] = self.get_scheduler_config(variance_type='fixed_small_log' ) UpperCAmelCase : Any = scheduler_class(**__A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5 def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase : int = self.scheduler_classes[0] UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(variance_type='learned_range' ) UpperCAmelCase : Optional[Any] = scheduler_class(**__A ) UpperCAmelCase : Tuple = 0.5 assert scheduler._get_variance(1 , predicted_variance=__A ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=__A ) - -5.799_8052 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=__A ) - -0.001_0011 < 1E-5 def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]: UpperCAmelCase : List[Any] = self.scheduler_classes[0] UpperCAmelCase : Any = self.get_scheduler_config() UpperCAmelCase : str = scheduler_class(**__A ) UpperCAmelCase : List[Any] = scheduler.timesteps UpperCAmelCase : Dict = self.dummy_model() UpperCAmelCase : str = self.dummy_sample_deter UpperCAmelCase : Any = torch.manual_seed(0 ) for i, t in enumerate(__A ): # 1. predict noise residual UpperCAmelCase : Dict = model(__A , __A ) # 2. predict previous mean of sample x_t-1 UpperCAmelCase : List[str] = scheduler.step(__A , __A , __A , generator=__A ).prev_sample UpperCAmelCase : str = pred_prev_sample UpperCAmelCase : List[str] = torch.sum(torch.abs(__A ) ) UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.328_4743 ) < 1E-3 def UpperCAmelCase_ ( self : Tuple ) -> List[str]: UpperCAmelCase : Any = self.scheduler_classes[0] UpperCAmelCase : Dict = self.get_scheduler_config() UpperCAmelCase : int = scheduler_class(**__A ) scheduler.set_timesteps(25 ) UpperCAmelCase : Tuple = scheduler.timesteps UpperCAmelCase : List[str] = self.dummy_model() UpperCAmelCase : List[str] = self.dummy_sample_deter UpperCAmelCase : List[str] = torch.manual_seed(0 ) for i, t in enumerate(__A ): # 1. predict noise residual UpperCAmelCase : Optional[Any] = model(__A , __A ) if i + 1 == timesteps.shape[0]: UpperCAmelCase : List[str] = None else: UpperCAmelCase : Any = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 UpperCAmelCase : Optional[Any] = scheduler.step( __A , __A , __A , prev_timestep=__A , generator=__A ).prev_sample UpperCAmelCase : List[Any] = pred_prev_sample UpperCAmelCase : int = torch.sum(torch.abs(__A ) ) UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.336_2038 ) < 1E-3 def UpperCAmelCase_ ( self : Optional[Any] ) -> int: pass def UpperCAmelCase_ ( self : str ) -> str: pass
151
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Dict = 0.01 with locka.acquire(): with pytest.raises(lowercase__ ): lowerCAmelCase_ :List[Any] = time.time() locka.acquire(lowercase__ ) assert time.time() - _start > timeout def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock""" lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(lowercase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 lowerCAmelCase_ :Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase__ ): locka.acquire(0 )
84
0
def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = set() # edges = list of graph's edges SCREAMING_SNAKE_CASE = get_edges(lowercase__) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: SCREAMING_SNAKE_CASE = edges.pop() chosen_vertices.add(lowercase__) chosen_vertices.add(lowercase__) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(lowercase__) return chosen_vertices def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node)) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
137
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s __UpperCAmelCase = 3e8 # unit of c : m * s^-1 def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase_ :Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
84
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase__ : List[Any] = { '''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''], '''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''], '''processing_whisper''': ['''WhisperProcessor'''], '''tokenization_whisper''': ['''WhisperTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : List[Any] = ['''WhisperTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = [ '''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WhisperForConditionalGeneration''', '''WhisperModel''', '''WhisperPreTrainedModel''', '''WhisperForAudioClassification''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : List[str] = [ '''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWhisperForConditionalGeneration''', '''TFWhisperModel''', '''TFWhisperPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : int = [ '''FlaxWhisperForConditionalGeneration''', '''FlaxWhisperModel''', '''FlaxWhisperPreTrainedModel''', '''FlaxWhisperForAudioClassification''', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys lowercase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int: '''simple docstring''' if len(lowercase__ ) != len(lowercase__ ): raise ValueError("""String lengths must match!""" ) lowerCAmelCase_ :Optional[int] = 0 for chara, chara in zip(lowercase__ , lowercase__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
84
0
from queue import PriorityQueue from typing import Any import numpy as np def __magic_name__ ( __a : dict , __a : str , __a : set , __a : set , __a : dict , __a : dict , __a : PriorityQueue , __a : dict , __a : float | int , ): '''simple docstring''' for nxt, d in graph[v]: if nxt in visited_forward: continue UpperCamelCase__ = cst_fwd.get(lowercase__ , np.inf ) UpperCamelCase__ = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) UpperCamelCase__ = new_cost_f UpperCamelCase__ = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: UpperCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __magic_name__ ( __a : str , __a : str , __a : dict , __a : dict ): '''simple docstring''' UpperCamelCase__ = -1 UpperCamelCase__ = set() UpperCamelCase__ = set() UpperCamelCase__ = {source: 0} UpperCamelCase__ = {destination: 0} UpperCamelCase__ = {source: None} UpperCamelCase__ = {destination: None} UpperCamelCase__ = PriorityQueue() UpperCamelCase__ = PriorityQueue() UpperCamelCase__ = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): UpperCamelCase__ = queue_forward.get() visited_forward.add(lowercase__ ) UpperCamelCase__ = queue_backward.get() visited_backward.add(lowercase__ ) UpperCamelCase__ = pass_and_relaxation( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) UpperCamelCase__ = pass_and_relaxation( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: UpperCamelCase__ = shortest_distance return shortest_path_distance lowerCamelCase_ = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } lowerCamelCase_ = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
244
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :int = nn.ModuleList(__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) # merge samples if i == 0: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample else: lowerCAmelCase_ :str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__A , __A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( __A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , ) idx += 1 lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}""" @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase_ :List[Any] = pretrained_model_path while os.path.isdir(__A ): lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A ) controlnets.append(__A ) idx += 1 lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" ) if len(__A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__A )
84
0
'''simple docstring''' import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class a__( unittest.TestCase ): '''simple docstring''' @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = pipeline( task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""") lowerCAmelCase = load_dataset("""ashraq/esc50""") lowerCAmelCase = dataset["""train"""]["""audio"""][-1]["""array"""] lowerCAmelCase = audio_classifier(__A , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""]) self.assertEqual( nested_simplify(__A) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , ) @unittest.skip("""No models are available in TF""") def a_ ( self): """simple docstring""" pass @slow @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = pipeline( task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , ) # This is an audio of a dog lowerCAmelCase = load_dataset("""ashraq/esc50""") lowerCAmelCase = dataset["""train"""]["""audio"""][-1]["""array"""] lowerCAmelCase = audio_classifier(__A , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""]) self.assertEqual( nested_simplify(__A) , [ {"""score""": 0.999, """label""": """Sound of a dog"""}, {"""score""": 0.001, """label""": """Sound of vaccum cleaner"""}, ] , ) lowerCAmelCase = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""]) self.assertEqual( nested_simplify(__A) , [ [ {"""score""": 0.999, """label""": """Sound of a dog"""}, {"""score""": 0.001, """label""": """Sound of vaccum cleaner"""}, ], ] * 5 , ) lowerCAmelCase = audio_classifier( [audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5) self.assertEqual( nested_simplify(__A) , [ [ {"""score""": 0.999, """label""": """Sound of a dog"""}, {"""score""": 0.001, """label""": """Sound of vaccum cleaner"""}, ], ] * 5 , ) @unittest.skip("""No models are available in TF""") def a_ ( self): """simple docstring""" pass
272
"""simple docstring""" from PIL import Image def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image: '''simple docstring''' def brightness(lowercase__ : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(lowercase__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 __UpperCAmelCase = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
84
0
from __future__ import annotations def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" snake_case = position snake_case = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] snake_case = [] for position in positions: snake_case = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(lowercase__ ) return permissible_positions def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" return not any(elem == 0 for row in board for elem in row ) def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" if is_complete(lowercase__ ): return True for position in get_valid_pos(lowercase__ ,len(lowercase__ ) ): snake_case = position if board[y][x] == 0: snake_case = curr + 1 if open_knight_tour_helper(lowercase__ ,lowercase__ ,curr + 1 ): return True snake_case = 0 return False def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" snake_case = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )] for i in range(lowercase__ ): for j in range(lowercase__ ): snake_case = 1 if open_knight_tour_helper(lowercase__ ,(i, j) ,1 ): return board snake_case = 0 snake_case = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
127
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _SCREAMING_SNAKE_CASE : def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Dict = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = inputs["""prompt"""] lowerCAmelCase_ :Optional[int] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Optional[int] = inputs["""output_type"""] if "image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""image"""] else: lowerCAmelCase_ :int = None if "mask_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""mask_image"""] else: lowerCAmelCase_ :int = None if "original_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""original_image"""] else: lowerCAmelCase_ :List[Any] = None lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A ) # inputs with prompt converted to embeddings lowerCAmelCase_ :List[str] = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :int = image if mask_image is not None: lowerCAmelCase_ :Tuple = mask_image if original_image is not None: lowerCAmelCase_ :Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__A , __A , __A ) lowerCAmelCase_ :Optional[int] = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Tuple = inputs["""output_type"""] # inputs with prompt converted to embeddings lowerCAmelCase_ :Tuple = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :Optional[int] = image if mask_image is not None: lowerCAmelCase_ :str = mask_image if original_image is not None: lowerCAmelCase_ :Tuple = original_image lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Any = self.get_dummy_components() lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Dict = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 )
84
0
"""simple docstring""" import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu A : Tuple = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ): '''simple docstring''' __lowerCAmelCase = True while ask_again: __lowerCAmelCase = input(lowercase__ ) try: if default is not None and len(lowercase__ ) == 0: return default return convert_value(lowercase__ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowercase__ ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=[] , _UpperCamelCase=None , _UpperCamelCase=0 ): '''simple docstring''' __lowerCAmelCase = BulletMenu(lowercase__ , lowercase__ ) __lowerCAmelCase = menu.run(default_choice=lowercase__ ) return convert_value(lowercase__ ) if convert_value is not None else result def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = int(lowercase__ ) return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = int(lowercase__ ) return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = int(lowercase__ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = int(lowercase__ ) return PrecisionType(["no", "fp16", "bf16", "fp8"][value] ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = int(lowercase__ ) return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class _UpperCamelCase ( argparse.RawDescriptionHelpFormatter ): '''simple docstring''' def snake_case ( self , __a , __a , __a , __a ): __lowerCAmelCase = super()._format_usage(__A , __A , __A , __A ) __lowerCAmelCase = usage.replace("<command> [<args>] " , "" ) return usage
57
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :int = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :List[Any] = jax.device_count() lowerCAmelCase_ :Optional[Any] = num_samples * [prompt] lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Optional[Any] = replicate(__A ) lowerCAmelCase_ :Union[str, Any] = shard(__A ) lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2""" lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained( __A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , ) lowerCAmelCase_ :Optional[int] = scheduler_params lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger""" lowerCAmelCase_ :Tuple = jax.device_count() lowerCAmelCase_ :str = num_samples * [prompt] lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A ) lowerCAmelCase_ :Tuple = replicate(__A ) lowerCAmelCase_ :Optional[int] = shard(__A ) lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 ) lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() ) lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1] lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
84
0
import operator as op lowerCamelCase = 'scaler.pt' lowerCamelCase = 'pytorch_model' lowerCamelCase = 'random_states' lowerCamelCase = 'optimizer' lowerCamelCase = 'scheduler' lowerCamelCase = 'pytorch_model.bin' lowerCamelCase = 'pytorch_model.bin.index.json' lowerCamelCase = 'model.safetensors' lowerCamelCase = 'model.safetensors.index.json' lowerCamelCase = '1.10.2' lowerCamelCase = 'py38' lowerCamelCase = '4.17.0' lowerCamelCase = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge'] lowerCamelCase = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2'] lowerCamelCase = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP'] lowerCamelCase = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH'] lowerCamelCase = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] lowerCamelCase = '2.0.1' lowerCamelCase = ['pdsh', 'standard', 'openmpi', 'mvapich'] lowerCamelCase = ['default', 'reduce-overhead', 'max-autotune'] lowerCamelCase = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowerCamelCase = [ 'nnodes', 'nproc_per_node', 'rdzv_backend', 'rdzv_endpoint', 'rdzv_id', 'rdzv_conf', 'standalone', 'max_restarts', 'monitor_interval', 'start_method', 'role', 'module', 'm', 'no_python', 'run_path', 'log_dir', 'r', 'redirects', 't', 'tee', 'node_rank', 'master_addr', 'master_port', ] lowerCamelCase = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM'] lowerCamelCase = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
199
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def _snake_case ( ) -> Generator[int, None, None]: '''simple docstring''' lowerCAmelCase_ :dict[int, int] = {} lowerCAmelCase_ :int = 2 while True: lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ ) if factor: lowerCAmelCase_ :Optional[int] = factor + prime while x in factor_map: x += factor lowerCAmelCase_ :List[str] = factor else: lowerCAmelCase_ :Optional[int] = prime yield prime prime += 1 def _snake_case ( lowercase__ : float = 1E10 ) -> int: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = sieve() lowerCAmelCase_ :str = 1 while True: lowerCAmelCase_ :int = next(lowercase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowercase__ ) n += 2 if __name__ == "__main__": print(solution())
84
0
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class a__ ( A__ ): """simple docstring""" def __init__( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , **lowercase , ) -> List[str]: '''simple docstring''' A__ = path_or_paths A__ = split if split or isinstance(__A , __A ) else """train""" A__ = features A__ = cache_dir A__ = keep_in_memory A__ = streaming A__ = num_proc A__ = kwargs @abstractmethod def UpperCamelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: '''simple docstring''' pass class a__ ( A__ ): """simple docstring""" def __init__( self , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , **lowercase , ) -> int: '''simple docstring''' A__ = features A__ = cache_dir A__ = keep_in_memory A__ = streaming A__ = num_proc A__ = kwargs @abstractmethod def UpperCamelCase ( self ) -> Union[Dataset, IterableDataset]: '''simple docstring''' pass
68
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx" def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]: lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) ) lowerCAmelCase_ :List[Any] = torch.manual_seed(__A ) lowerCAmelCase_ :Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :int = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = self.get_dummy_inputs() lowerCAmelCase_ :List[str] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :str = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs() lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Dict = pipe(**__A ).images lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Dict = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[int] = ort.SessionOptions() lowerCAmelCase_ :Dict = False return options def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :List[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :str = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :Dict = output.images lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase_ :List[str] = init_image.resize((128, 128) ) lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" ) lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation""" lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = pipe( prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , ) lowerCAmelCase_ :int = output.images lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase_ :Union[str, Any] = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
84
0
def lowerCamelCase_ ( _a : int = 1000 ): '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
345
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , ) assert hasattr(self , """env""" ) def __lowerCAmelCase ( self , __A ) -> Any: # configuration for running training on smdistributed Model Parallel lowerCAmelCase_ :Union[str, Any] = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase_ :Tuple = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , ) def __lowerCAmelCase ( self , __A ) -> List[Any]: TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def __lowerCAmelCase ( self , __A ) -> List[str]: # create estimator lowerCAmelCase_ :Any = self.create_estimator(__A ) # run training estimator.fit() # result dataframe lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase_ :Optional[int] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
84
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase_ = { """configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""], """tokenization_roc_bert""": ["""RoCBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """RoCBertForCausalLM""", """RoCBertForMaskedLM""", """RoCBertForMultipleChoice""", """RoCBertForPreTraining""", """RoCBertForQuestionAnswering""", """RoCBertForSequenceClassification""", """RoCBertForTokenClassification""", """RoCBertLayer""", """RoCBertModel""", """RoCBertPreTrainedModel""", """load_tf_weights_in_roc_bert""", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
309
"""simple docstring""" def _snake_case ( lowercase__ : int = 1_0 ) -> str: '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError("""Invalid input""" ) lowerCAmelCase_ :List[str] = 1_0**n lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(10) = }""")
84
0
'''simple docstring''' from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar lowercase__ = TypeVar("T") class A_ ( Generic[T] ): '''simple docstring''' UpperCAmelCase_ : deque[T] # Cache store of keys UpperCAmelCase_ : set[T] # References of the keys in cache UpperCAmelCase_ : int = 10 # Maximum capacity of cache def __init__( self : Tuple , lowercase_ : Tuple ) -> None: UpperCAmelCase : Tuple = deque() UpperCAmelCase : Any = set() if not n: UpperCAmelCase : str = sys.maxsize elif n < 0: raise ValueError('n should be an integer greater than 0.' ) else: UpperCAmelCase : Any = n def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Any ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: UpperCAmelCase : int = self.dq_store.pop() self.key_reference.remove(__A ) else: self.dq_store.remove(__A ) self.dq_store.appendleft(__A ) self.key_reference.add(__A ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> None: for k in self.dq_store: print(__A ) def __repr__( self : List[str] ) -> str: return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() lowercase__ = LRUCache(4) lru_cache.refer("A") lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer("A") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
151
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase = 'src/transformers' __UpperCAmelCase = 'docs/source/en/tasks' def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : Any ) -> str: '''simple docstring''' with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCAmelCase_ :List[Any] = f.readlines() # Find the start prompt. lowerCAmelCase_ :Tuple = 0 while not lines[start_index].startswith(lowercase__ ): start_index += 1 start_index += 1 lowerCAmelCase_ :Dict = start_index while not lines[end_index].startswith(lowercase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase_ :List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() ) lowerCAmelCase_ :Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def _snake_case ( lowercase__ : int , lowercase__ : str=False ) -> Dict: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = _find_text_in_file( filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , ) lowerCAmelCase_ :int = get_model_list_for_task(lowercase__ ) if current_list != new_list: if overwrite: with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" """ to fix this.""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __UpperCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
84
0
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _snake_case ( A__ ): _lowercase : Any = CustomTokenizer pass
137
"""simple docstring""" def _snake_case ( lowercase__ : list[int] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = [] if len(lowercase__ ) == 1: return [nums.copy()] for _ in range(len(lowercase__ ) ): lowerCAmelCase_ :Optional[Any] = nums.pop(0 ) lowerCAmelCase_ :str = permute(lowercase__ ) for perm in permutations: perm.append(lowercase__ ) result.extend(lowercase__ ) nums.append(lowercase__ ) return result def _snake_case ( lowercase__ : Tuple ) -> List[str]: '''simple docstring''' def backtrack(lowercase__ : str ): if start == len(lowercase__ ) - 1: output.append(nums[:] ) else: for i in range(lowercase__ , len(lowercase__ ) ): lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] backtrack(start + 1 ) lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] # backtrack lowerCAmelCase_ :int = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function __UpperCAmelCase = permutea([1, 2, 3]) print(res) doctest.testmod()
84
0
import torch from diffusers import StableDiffusionPipeline lowercase__ : Union[str, Any] = '''path-to-your-trained-model''' lowercase__ : Optional[int] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''') lowercase__ : Optional[Any] = '''A photo of sks dog in a bucket''' lowercase__ : List[str] = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0] image.save('''dog-bucket.png''')
338
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = BioGptTokenizer UpperCAmelCase_ :str = False def __lowerCAmelCase ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[Any] = """lower newer""" lowerCAmelCase_ :Tuple = """lower newer""" return input_text, output_text def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase_ :Union[str, Any] = """lower""" lowerCAmelCase_ :Any = ["""low""", """er</w>"""] lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = tokens + ["""<unk>"""] lowerCAmelCase_ :List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
84
0
import baseaa def __magic_name__ ( __a : str ): '''simple docstring''' return baseaa.aaaencode(string.encode("""utf-8""" ) ) def __magic_name__ ( __a : bytes ): '''simple docstring''' return baseaa.aaadecode(lowercase__ ).decode("""utf-8""" ) if __name__ == "__main__": import doctest doctest.testmod()
244
"""simple docstring""" from ...configuration_utils import PretrainedConfig class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "bert-generation" def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Any = vocab_size lowerCAmelCase_ :List[Any] = hidden_size lowerCAmelCase_ :Optional[int] = num_hidden_layers lowerCAmelCase_ :int = num_attention_heads lowerCAmelCase_ :List[Any] = hidden_act lowerCAmelCase_ :Optional[Any] = intermediate_size lowerCAmelCase_ :List[Any] = hidden_dropout_prob lowerCAmelCase_ :int = attention_probs_dropout_prob lowerCAmelCase_ :Tuple = max_position_embeddings lowerCAmelCase_ :List[str] = initializer_range lowerCAmelCase_ :Union[str, Any] = layer_norm_eps lowerCAmelCase_ :List[str] = position_embedding_type lowerCAmelCase_ :Optional[int] = use_cache
84
0
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __lowercase = logging.get_logger(__name__) __lowercase = {'''vocab_file''': '''spiece.model'''} __lowercase = { '''vocab_file''': { '''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''', } } class a__( A__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<sep>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<cls>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=["<eop>", "<eod>"] , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" lowerCAmelCase = AddedToken(__A , lstrip=__A , rstrip=__A) if isinstance(__A , __A) else mask_token lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) lowerCAmelCase = 3 lowerCAmelCase = do_lower_case lowerCAmelCase = remove_space lowerCAmelCase = keep_accents lowerCAmelCase = vocab_file lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(__A) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""") lowerCAmelCase = jieba lowerCAmelCase = str.maketrans(""" \n""" , """\u2582\u2583""") @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def a_ ( self): """simple docstring""" return len(self.sp_model) def a_ ( self): """simple docstring""" lowerCAmelCase = {self.convert_ids_to_tokens(__A): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self): """simple docstring""" lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def a_ ( self , __lowerCAmelCase): """simple docstring""" if self.remove_space: lowerCAmelCase = """ """.join(inputs.strip().split()) else: lowerCAmelCase = inputs lowerCAmelCase = outputs.replace("""``""" , """\"""").replace("""''""" , """\"""") if not self.keep_accents: lowerCAmelCase = unicodedata.normalize("""NFKD""" , __A) lowerCAmelCase = """""".join([c for c in outputs if not unicodedata.combining(__A)]) if self.do_lower_case: lowerCAmelCase = outputs.lower() return outputs def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = self.preprocess_text(__A) lowerCAmelCase = self.sp_model.encode(__A , out_type=__A) lowerCAmelCase = [] for piece in pieces: if len(__A) > 1 and piece[-1] == str(""",""") and piece[-2].isdigit(): lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__A , """""")) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: lowerCAmelCase = cur_pieces[1:] else: lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(__A) else: new_pieces.append(__A) return new_pieces def a_ ( self , __lowerCAmelCase): """simple docstring""" return self.sp_model.PieceToId(__A) def a_ ( self , __lowerCAmelCase): """simple docstring""" return self.sp_model.IdToPiece(__A) def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = """""".join(__A).replace(__A , """ """).strip() return out_string def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A) if token_ids_a is not None: return ([0] * len(__A)) + [1] + ([0] * len(__A)) + [1, 1] return ([0] * len(__A)) + [1, 1] def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [2] if token_ids_a is None: return len(token_ids_a + sep) * [0] + cls_segment_id return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" if not os.path.isdir(__A): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return lowerCAmelCase = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(__A) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , __A) elif not os.path.isfile(self.vocab_file): with open(__A , """wb""") as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__A) return (out_vocab_file,) def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = super()._decode(*__A , **__A) lowerCAmelCase = text.replace(""" """ , """""").replace("""\u2582""" , """ """).replace("""\u2583""" , """\n""") return text
272
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [False] * len(lowercase__ ) lowerCAmelCase_ :str = [] queue.append(lowercase__ ) lowerCAmelCase_ :Any = True while queue: lowerCAmelCase_ :Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :int = u return visited[t] def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ )) lowerCAmelCase_ :str = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = float("""Inf""" ) lowerCAmelCase_ :List[str] = sink while s != source: # Find the minimum value in select path lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] ) lowerCAmelCase_ :Union[str, Any] = parent[s] max_flow += path_flow lowerCAmelCase_ :Tuple = sink while v != source: lowerCAmelCase_ :List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase_ :Union[str, Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
84
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Tuple = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class A__ ( A__ ): """simple docstring""" __magic_name__ = "ctrl" __magic_name__ = ["past_key_values"] __magic_name__ = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __snake_case=2_4_6_5_3_4 , __snake_case=2_5_6 , __snake_case=1_2_8_0 , __snake_case=8_1_9_2 , __snake_case=4_8 , __snake_case=1_6 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1E-6 , __snake_case=0.02 , __snake_case=True , **__snake_case , ): snake_case = vocab_size snake_case = n_positions snake_case = n_embd snake_case = n_layer snake_case = n_head snake_case = dff snake_case = resid_pdrop snake_case = embd_pdrop snake_case = layer_norm_epsilon snake_case = initializer_range snake_case = use_cache super().__init__(**__A )
127
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 1_0 lowerCAmelCase_ :Optional[int] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) lowerCAmelCase_ :int = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0, """id""": list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files __UpperCAmelCase = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" lowerCAmelCase_ :List[Any] = FILE_CONTENT with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> Tuple: '''simple docstring''' import bza lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' import gzip lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" ) with gzip.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" ) with lza.frame.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any: '''simple docstring''' if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' import tarfile lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> str: '''simple docstring''' import lzma lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" ) with lzma.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import zipfile lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Tuple: '''simple docstring''' if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" ) with zstd.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml""" lowerCAmelCase_ :Any = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(lowercase__ , """w""" ) as f: f.write(lowercase__ ) return filename __UpperCAmelCase = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __UpperCAmelCase = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __UpperCAmelCase = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __UpperCAmelCase = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __UpperCAmelCase = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> Any: '''simple docstring''' lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ ) lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: lowerCAmelCase_ :Union[str, Any] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(lowercase__ , """w""" , newline="""""" ) as f: lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' import bza lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(lowercase__ , """rb""" ) as f: lowerCAmelCase_ :Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) lowerCAmelCase_ :Optional[Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(lowercase__ , """wb""" ) as f: lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS} with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(lowercase__ , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any: '''simple docstring''' import gzip lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(lowercase__ , """rb""" ) as orig_file: with gzip.open(lowercase__ , """wb""" ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowercase__ , """w""" ) as f: f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""] lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(lowercase__ , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> int: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( ) -> Tuple: '''simple docstring''' return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(lowercase__ , """w""" ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 1_0 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 1_0 ) return data_dir
84
0
"""simple docstring""" from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class _UpperCamelCase ( A__ ): '''simple docstring''' __UpperCAmelCase : Any ="Salesforce/blip-image-captioning-base" __UpperCAmelCase : str =( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) __UpperCAmelCase : List[Any] ="image_captioner" __UpperCAmelCase : Tuple =AutoModelForVisionaSeq __UpperCAmelCase : List[Any] =["image"] __UpperCAmelCase : Union[str, Any] =["text"] def __init__( self , *__a , **__a ): requires_backends(self , ["vision"] ) super().__init__(*__A , **__A ) def snake_case ( self , __a ): return self.pre_processor(images=__A , return_tensors="pt" ) def snake_case ( self , __a ): return self.model.generate(**__A ) def snake_case ( self , __a ): return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0].strip()
57
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[Any] = "data2vec-text" def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Dict = vocab_size lowerCAmelCase_ :Dict = hidden_size lowerCAmelCase_ :int = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :Any = hidden_act lowerCAmelCase_ :Optional[int] = intermediate_size lowerCAmelCase_ :str = hidden_dropout_prob lowerCAmelCase_ :Any = attention_probs_dropout_prob lowerCAmelCase_ :str = max_position_embeddings lowerCAmelCase_ :int = type_vocab_size lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :List[Any] = layer_norm_eps lowerCAmelCase_ :List[Any] = position_embedding_type lowerCAmelCase_ :List[Any] = use_cache lowerCAmelCase_ :List[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
84
0
import os def a_ ( SCREAMING_SNAKE_CASE__ : str = "matrix.txt" ): '''simple docstring''' with open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) as in_file: _lowerCamelCase : str =in_file.read() _lowerCamelCase : Tuple =[[int(lowercase__ ) for cell in row.split(',' )] for row in data.strip().splitlines()] _lowerCamelCase : Tuple =[[0 for cell in row] for row in grid] _lowerCamelCase : str =len(grid[0] ) _lowerCamelCase : Union[str, Any] =[[0 for i in range(lowercase__ )] for j in range(lowercase__ )] _lowerCamelCase : Optional[Any] =grid[0][0] for i in range(1 , lowercase__ ): _lowerCamelCase : Optional[int] =grid[0][i] + dp[0][i - 1] for i in range(1 , lowercase__ ): _lowerCamelCase : str =grid[i][0] + dp[i - 1][0] for i in range(1 , lowercase__ ): for j in range(1 , lowercase__ ): _lowerCamelCase : Dict =grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(F"""{solution() = }""")
199
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase_ :Tuple = (wi_a, wi_a) else: lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ :List[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ :Optional[Any] = layer_norm lowerCAmelCase_ :Any = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Tuple = q.T lowerCAmelCase_ :str = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :List[Any] = wi[0].T lowerCAmelCase_ :Dict = wi[1].T else: lowerCAmelCase_ :int = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ :List[Any] = layer_norm lowerCAmelCase_ :List[str] = k.T lowerCAmelCase_ :Any = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :Dict = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ :Optional[int] = layer_norm lowerCAmelCase_ :str = k.T lowerCAmelCase_ :Tuple = o.T lowerCAmelCase_ :Any = q.T lowerCAmelCase_ :int = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ :List[Any] = layer_norm if split_mlp_wi: lowerCAmelCase_ :Any = wi[0].T lowerCAmelCase_ :Any = wi[1].T else: lowerCAmelCase_ :Tuple = wi.T lowerCAmelCase_ :List[str] = wo.T lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ :Optional[Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T return new def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ :Any = state_dict["""shared.weight"""] return state_dict def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
84
0
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def lowerCAmelCase__ ( ) -> str: '''simple docstring''' A__ = HfArgumentParser(lowercase__ ) A__ = parser.parse_args_into_dataclasses()[0] A__ = TensorFlowBenchmark(args=lowercase__ ) try: A__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: A__ = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" A__ = """ """.join(str(lowercase__ ).split(" " )[:-1] ) A__ = """""" A__ = eval(str(lowercase__ ).split(" " )[-1] ) A__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowercase__ ) if len(lowercase__ ) > 0: A__ = full_error_msg + begin_error_msg + str(lowercase__ ) raise ValueError(lowercase__ ) benchmark.run() if __name__ == "__main__": main()
68
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Optional[Any] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" ) if "norm" in key: lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" ) if "layer_norm1" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )] lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" ) if "attn.q" in key: lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" ) if "bot_conv" in key: lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCAmelCase_ :List[Any] = value return new_state_dict def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ :Optional[Any] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ :List[Any] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :] def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int: '''simple docstring''' lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor() # prepare image lowerCAmelCase_ :List[Any] = prepare_img() lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) ) # rename keys lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ ) # key and value matrices need special treatment read_in_k_v(lowercase__ , lowercase__ ) # create HuggingFace model and load state dict lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # forward pass lowerCAmelCase_ :Dict = model(lowercase__ ) lowerCAmelCase_ :Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase_ :Optional[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase_ :Any = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __UpperCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
84
0
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _snake_case ( A__ , unittest.TestCase ): '''simple docstring''' A__ : Tuple = LEDTokenizer A__ : Tuple = LEDTokenizerFast A__ : Optional[Any] = True def A__ ( self: List[Any] ) -> Optional[Any]: super().setUp() UpperCAmelCase_ : Optional[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCAmelCase_ : List[Any] = dict(zip(__A ,range(len(__A ) ) ) ) UpperCAmelCase_ : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCAmelCase_ : Optional[int] = {"""unk_token""": """<unk>"""} UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(__A ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__A ) ) def A__ ( self: List[str] ,**lowerCamelCase_: Tuple ) -> Dict: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__A ) def A__ ( self: Optional[Any] ,**lowerCamelCase_: Union[str, Any] ) -> Any: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**__A ) def A__ ( self: List[str] ,lowerCamelCase_: Any ) -> Optional[int]: return "lower newer", "lower newer" @cached_property def A__ ( self: Dict ) -> str: return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def A__ ( self: Any ) -> Tuple: return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def A__ ( self: Union[str, Any] ) -> Dict: UpperCAmelCase_ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] UpperCAmelCase_ : Optional[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Dict = tokenizer(__A ,max_length=len(__A ) ,padding=__A ,return_tensors="""pt""" ) self.assertIsInstance(__A ,__A ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) UpperCAmelCase_ : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(__A ,__A ) @require_torch def A__ ( self: List[str] ) -> Tuple: UpperCAmelCase_ : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Optional[Any] = tokenizer(__A ,padding=__A ,return_tensors="""pt""" ) self.assertIn("""input_ids""" ,__A ) self.assertIn("""attention_mask""" ,__A ) self.assertNotIn("""labels""" ,__A ) self.assertNotIn("""decoder_attention_mask""" ,__A ) @require_torch def A__ ( self: Any ) -> Dict: UpperCAmelCase_ : Union[str, Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Optional[int] = tokenizer(text_target=__A ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" ) self.assertEqual(32 ,targets["""input_ids"""].shape[1] ) @require_torch def A__ ( self: Optional[int] ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Union[str, Any] = tokenizer( ["""I am a small frog""" * 1024, """I am a small frog"""] ,padding=__A ,truncation=__A ,return_tensors="""pt""" ) self.assertIsInstance(__A ,__A ) self.assertEqual(batch.input_ids.shape ,(2, 5122) ) @require_torch def A__ ( self: Union[str, Any] ) -> Dict: UpperCAmelCase_ : Any = ["""A long paragraph for summarization."""] UpperCAmelCase_ : Union[str, Any] = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Any = tokenizer(__A ,return_tensors="""pt""" ) UpperCAmelCase_ : Union[str, Any] = tokenizer(text_target=__A ,return_tensors="""pt""" ) UpperCAmelCase_ : Any = inputs["""input_ids"""] UpperCAmelCase_ : Dict = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def A__ ( self: List[Any] ) -> str: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Optional[int] = ["""Summary of the text.""", """Another summary."""] UpperCAmelCase_ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] UpperCAmelCase_ : Optional[int] = tokenizer(__A ,padding=__A ) UpperCAmelCase_ : Optional[Any] = [[0] * len(__A ) for x in encoded_output["""input_ids"""]] UpperCAmelCase_ : Dict = tokenizer.pad(__A ) self.assertSequenceEqual(outputs["""global_attention_mask"""] ,__A ) def A__ ( self: Tuple ) -> int: pass def A__ ( self: Union[str, Any] ) -> Optional[int]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(__A ,**__A ) UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(__A ,**__A ) UpperCAmelCase_ : Tuple = """A, <mask> AllenNLP sentence.""" UpperCAmelCase_ : Optional[Any] = tokenizer_r.encode_plus(__A ,add_special_tokens=__A ,return_token_type_ids=__A ) UpperCAmelCase_ : List[Any] = tokenizer_p.encode_plus(__A ,add_special_tokens=__A ,return_token_type_ids=__A ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,) UpperCAmelCase_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( __A ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( __A ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
345
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
84
0
'''simple docstring''' from typing import Any class a_ : def __init__( self , snake_case_ ): _lowerCAmelCase : List[str] = data _lowerCAmelCase : Union[str, Any] = None class a_ : def __init__( self ): _lowerCAmelCase : Tuple = None def __UpperCamelCase ( self ): _lowerCAmelCase : Tuple = self.head while temp is not None: print(temp.data , end=""" """ ) _lowerCAmelCase : str = temp.next print() def __UpperCamelCase ( self , snake_case_ ): _lowerCAmelCase : str = Node(__A ) _lowerCAmelCase : int = self.head _lowerCAmelCase : Optional[int] = new_node def __UpperCamelCase ( self , snake_case_ , snake_case_ ): if node_data_a == node_data_a: return else: _lowerCAmelCase : Optional[int] = self.head while node_a is not None and node_a.data != node_data_a: _lowerCAmelCase : Optional[int] = node_a.next _lowerCAmelCase : Any = self.head while node_a is not None and node_a.data != node_data_a: _lowerCAmelCase : List[str] = node_a.next if node_a is None or node_a is None: return _lowerCAmelCase : Optional[Any] = node_a.data, node_a.data if __name__ == "__main__": UpperCamelCase_ = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
309
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "levit" def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any: super().__init__(**__A ) lowerCAmelCase_ :Tuple = image_size lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :Union[str, Any] = kernel_size lowerCAmelCase_ :Optional[Any] = stride lowerCAmelCase_ :Optional[int] = padding lowerCAmelCase_ :Optional[Any] = hidden_sizes lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :int = depths lowerCAmelCase_ :List[str] = key_dim lowerCAmelCase_ :str = drop_path_rate lowerCAmelCase_ :Optional[int] = patch_size lowerCAmelCase_ :Union[str, Any] = attention_ratio lowerCAmelCase_ :Dict = mlp_ratio lowerCAmelCase_ :Any = initializer_range lowerCAmelCase_ :Optional[int] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Tuple = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
84
0
'''simple docstring''' from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES lowercase__ = "tiny-wmt19-en-ru" # Build # borrowed from a test lowercase__ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowercase__ = dict(zip(vocab, range(len(vocab)))) lowercase__ = ["l o 123", "lo w 1456", "e r</w> 1789", ""] with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = Path(tmpdirname) lowercase__ = build_dir / VOCAB_FILES_NAMES["src_vocab_file"] lowercase__ = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"] lowercase__ = build_dir / VOCAB_FILES_NAMES["merges_file"] with open(src_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, "w") as fp: fp.write("\n".join(merges)) lowercase__ = FSMTTokenizer( langs=["en", "ru"], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) lowercase__ = FSMTConfig( langs=["ru", "en"], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) lowercase__ = FSMTForConditionalGeneration(config) print(f'''num of params {tiny_model.num_parameters()}''') # Test lowercase__ = tokenizer(["Making tiny model"], return_tensors="pt") lowercase__ = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f'''Generated {mname_tiny}''') # Upload # transformers-cli upload tiny-wmt19-en-ru
151
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) ) lowerCAmelCase_ :Dict = 0.01 with locka.acquire(): with pytest.raises(lowercase__ ): lowerCAmelCase_ :List[Any] = time.time() locka.acquire(lowercase__ ) assert time.time() - _start > timeout def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock""" lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(lowercase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 lowerCAmelCase_ :Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase__ ): locka.acquire(0 )
84
0
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return 1 if input_a == input_a else 0 def lowerCamelCase__ (): assert xnor_gate(0 , 0) == 1 assert xnor_gate(0 , 1) == 0 assert xnor_gate(1 , 0) == 0 assert xnor_gate(1 , 1) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
137
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s __UpperCAmelCase = 3e8 # unit of c : m * s^-1 def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase_ :Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
84
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : List[Any] = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowercase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
338
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int: '''simple docstring''' if len(lowercase__ ) != len(lowercase__ ): raise ValueError("""String lengths must match!""" ) lowerCAmelCase_ :Optional[int] = 0 for chara, chara in zip(lowercase__ , lowercase__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
84
0
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __A( A__ ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="None" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = relative_attention UpperCamelCase__ = position_biased_input UpperCamelCase__ = pos_att_type UpperCamelCase__ = scope def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ (self ): return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_config() UpperCamelCase__ = 3_00 return config def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = DebertaModel(config=__A ) model.to(__A ) model.eval() UpperCamelCase__ = model(__A , attention_mask=__A , token_type_ids=__A )[0] UpperCamelCase__ = model(__A , token_type_ids=__A )[0] UpperCamelCase__ = model(__A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = DebertaForMaskedLM(config=__A ) model.to(__A ) model.eval() UpperCamelCase__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = DebertaForSequenceClassification(__A ) model.to(__A ) model.eval() UpperCamelCase__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__A ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = DebertaForTokenClassification(config=__A ) model.to(__A ) model.eval() UpperCamelCase__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = DebertaForQuestionAnswering(config=__A ) model.to(__A ) model.eval() UpperCamelCase__ = model( __A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( UpperCamelCase__ ) = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A( A__ , A__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = ( { "feature-extraction": DebertaModel, "fill-mask": DebertaForMaskedLM, "question-answering": DebertaForQuestionAnswering, "text-classification": DebertaForSequenceClassification, "token-classification": DebertaForTokenClassification, "zero-shot": DebertaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): UpperCamelCase__ = DebertaModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=__A , hidden_size=37 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__A ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__A ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__A ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__A ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__A ) @slow def UpperCAmelCase_ (self ): for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = DebertaModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_torch @require_sentencepiece @require_tokenizers class __A( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""" ) def UpperCAmelCase_ (self ): pass @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = DebertaModel.from_pretrained("""microsoft/deberta-base""" ) UpperCamelCase__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCamelCase__ = model(__A , attention_mask=__A )[0] # compare the actual values for a slice. UpperCamelCase__ = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
244
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :int = nn.ModuleList(__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ): lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) # merge samples if i == 0: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample else: lowerCAmelCase_ :str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__A , __A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( __A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , ) idx += 1 lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}""" @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]: lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Dict = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase_ :List[Any] = pretrained_model_path while os.path.isdir(__A ): lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A ) controlnets.append(__A ) idx += 1 lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" ) if len(__A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__A )
84
0
'''simple docstring''' import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __lowercase = logging.getLogger() def snake_case__ ( ) -> int: '''simple docstring''' lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("""-f""" ) lowerCAmelCase = parser.parse_args() return args.f class a__( A__ ): '''simple docstring''' def a_ ( self): """simple docstring""" lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(__A) def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , """run_glue_deebert.py""") with patch.object(__A , """argv""" , __A): lowerCAmelCase = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(__A , 0.666) @slow @require_torch_non_multi_gpu def a_ ( self): """simple docstring""" lowerCAmelCase = """ --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage """.split() self.run_and_check(__A) lowerCAmelCase = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(__A) lowerCAmelCase = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(__A)
272
"""simple docstring""" from PIL import Image def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image: '''simple docstring''' def brightness(lowercase__ : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(lowercase__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 __UpperCAmelCase = change_brightness(img, 1_00) brigt_img.save('image_data/lena_brightness.png', format='png')
84
0